From 0a6ddb693bb9d17535c71419eb6c2e8e0c6b2f00 Mon Sep 17 00:00:00 2001 From: whynowy Date: Mon, 23 Sep 2024 20:45:17 +0000 Subject: [PATCH] deploy: a92adbda49f2beba84a083ec755fd40d9e630e36 --- APIs/index.html | 23947 +++++++++++++++++++++++++++++++++++++ search/search_index.json | 2 +- sitemap.xml | 181 +- sitemap.xml.gz | Bin 249 -> 249 bytes 4 files changed, 24041 insertions(+), 89 deletions(-) create mode 100644 APIs/index.html diff --git a/APIs/index.html b/APIs/index.html new file mode 100644 index 0000000000..fb079d7a7a --- /dev/null +++ b/APIs/index.html @@ -0,0 +1,23947 @@ + + + + + + + + + + + + + + + + APIs - Argo Events - The Event-Based Dependency Manager for Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + +
+
+ + + + + + + + +

APIs

+ +

+ +Packages: +

+ + + +

+ +argoproj.io/v1alpha1 +

+ +

+ +

+ +

Resource Types:

+
    + +
+ +

+ +AMQPConsumeConfig +

+ +

+ +(Appears on: +AMQPEventSource) +

+ +

+ +

+ +AMQPConsumeConfig holds the configuration to immediately starts +delivering queued messages +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +consumerTag
string +
+ +(Optional) +

+ +ConsumerTag is the identity of the consumer included in every delivery +

+ +
+ +autoAck
bool +
+ +(Optional) +

+ +AutoAck when true, the server will acknowledge deliveries to this +consumer prior to writing the delivery to the network +

+ +
+ +exclusive
bool +
+ +(Optional) +

+ +Exclusive when true, the server will ensure that this is the sole +consumer from this queue +

+ +
+ +noLocal
bool +
+ +(Optional) +

+ +NoLocal flag is not supported by RabbitMQ +

+ +
+ +noWait
bool +
+ +(Optional) +

+ +NowWait when true, do not wait for the server to confirm the request and +immediately begin deliveries +

+ +
+ +

+ +AMQPEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +AMQPEventSource refers to an event-source for AMQP stream events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +URL for rabbitmq service +

+ +
+ +exchangeName
string +
+ +

+ +ExchangeName is the exchange name For more information, visit +https://www.rabbitmq.com/tutorials/amqp-concepts.html +

+ +
+ +exchangeType
string +
+ +

+ +ExchangeType is rabbitmq exchange type +

+ +
+ +routingKey
string +
+ +

+ +Routing key for bindings +

+ +
+ +connectionBackoff
+ Backoff +
+ +(Optional) +

+ +Backoff holds parameters applied to connection. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the amqp client. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +exchangeDeclare
+ +AMQPExchangeDeclareConfig +
+ +(Optional) +

+ +ExchangeDeclare holds the configuration for the exchange on the server +For more information, visit +https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.ExchangeDeclare +

+ +
+ +queueDeclare
+ +AMQPQueueDeclareConfig +
+ +(Optional) +

+ +QueueDeclare holds the configuration of a queue to hold messages and +deliver to consumers. Declaring creates a queue if it doesn’t already +exist, or ensures that an existing queue matches the same parameters For +more information, visit +https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueDeclare +

+ +
+ +queueBind
+ AMQPQueueBindConfig + +
+ +(Optional) +

+ +QueueBind holds the configuration that binds an exchange to a queue so +that publishings to the exchange will be routed to the queue when the +publishing routing key matches the binding routing key For more +information, visit +https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueBind +

+ +
+ +consume
+ AMQPConsumeConfig + +
+ +(Optional) +

+ +Consume holds the configuration to immediately starts delivering queued +messages For more information, visit +https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.Consume +

+ +
+ +auth
+BasicAuth +
+ +(Optional) +

+ +Auth hosts secret selectors for username and password +

+ +
+ +urlSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +URLSecret is secret reference for rabbitmq service URL +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +AMQPExchangeDeclareConfig +

+ +

+ +(Appears on: +AMQPEventSource) +

+ +

+ +

+ +AMQPExchangeDeclareConfig holds the configuration for the exchange on +the server +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +durable
bool +
+ +(Optional) +

+ +Durable keeps the exchange also after the server restarts +

+ +
+ +autoDelete
bool +
+ +(Optional) +

+ +AutoDelete removes the exchange when no bindings are active +

+ +
+ +internal
bool +
+ +(Optional) +

+ +Internal when true does not accept publishings +

+ +
+ +noWait
bool +
+ +(Optional) +

+ +NowWait when true does not wait for a confirmation from the server +

+ +
+ +

+ +AMQPQueueBindConfig +

+ +

+ +(Appears on: +AMQPEventSource) +

+ +

+ +

+ +AMQPQueueBindConfig holds the configuration that binds an exchange to a +queue so that publishings to the exchange will be routed to the queue +when the publishing routing key matches the binding routing key +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +noWait
bool +
+ +(Optional) +

+ +NowWait false and the queue could not be bound, the channel will be +closed with an error +

+ +
+ +

+ +AMQPQueueDeclareConfig +

+ +

+ +(Appears on: +AMQPEventSource) +

+ +

+ +

+ +AMQPQueueDeclareConfig holds the configuration of a queue to hold +messages and deliver to consumers. Declaring creates a queue if it +doesn’t already exist, or ensures that an existing queue matches the +same parameters +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +name
string +
+ +(Optional) +

+ +Name of the queue. If empty the server auto-generates a unique name for +this queue +

+ +
+ +durable
bool +
+ +(Optional) +

+ +Durable keeps the queue also after the server restarts +

+ +
+ +autoDelete
bool +
+ +(Optional) +

+ +AutoDelete removes the queue when no consumers are active +

+ +
+ +exclusive
bool +
+ +(Optional) +

+ +Exclusive sets the queues to be accessible only by the connection that +declares them and will be deleted wgen the connection closes +

+ +
+ +noWait
bool +
+ +(Optional) +

+ +NowWait when true, the queue assumes to be declared on the server +

+ +
+ +arguments
string +
+ +(Optional) +

+ +Arguments of a queue (also known as “x-arguments”) used for optional +features and plugins +

+ +
+ +

+ +AWSLambdaTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +AWSLambdaTrigger refers to specification of the trigger to invoke an AWS +Lambda function +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +functionName
string +
+ +

+ +FunctionName refers to the name of the function to invoke. +

+ +
+ +accessKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +AccessKey refers K8s secret containing aws access key +

+ +
+ +secretKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +SecretKey refers K8s secret containing aws secret key +

+ +
+ +region
string +
+ +

+ +Region is AWS region +

+ +
+ +payload
+ \[\]TriggerParameter + +
+ +

+ +Payload is the list of key-value extracted from an event payload to +construct the request payload. +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +(Optional) +

+ +Parameters is the list of key-value extracted from event’s payload that +are applied to the trigger resource. +

+ +
+ +invocationType
string +
+ +(Optional) +

+ +Choose from the following options. +

+ +
    + +
  • + +

    + +RequestResponse (default) - Invoke the function synchronously. Keep the +connection open until the function returns a response or times out. The +API response includes the function response and additional data. +

    + +
  • + +
  • + +

    + +Event - Invoke the function asynchronously. Send events that fail +multiple times to the function’s dead-letter queue (if it’s configured). +The API response only includes a status code. +

    + +
  • + +
  • + +

    + +DryRun - Validate parameter values and verify that the user or role has +permission to invoke the function. +

    + +
  • + +
+ +
+ +roleARN
string +
+ +(Optional) +

+ +RoleARN is the Amazon Resource Name (ARN) of the role to assume. +

+ +
+ +

+ +Amount +

+ +

+ +(Appears on: +Backoff) +

+ +

+ +

+ +Amount represent a numeric amount. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +value
\[\]byte +
+ +
+ +

+ +ArgoWorkflowOperation (string alias) +

+ +

+ +

+ +(Appears on: +ArgoWorkflowTrigger) +

+ +

+ +

+ +ArgoWorkflowOperation refers to the type of the operation performed on +the Argo Workflow +

+ +

+ +

+ +ArgoWorkflowTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +ArgoWorkflowTrigger is the trigger for the Argo Workflow +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +source
+ ArtifactLocation + +
+ +

+ +Source of the K8s resource file(s) +

+ +
+ +operation
+ +ArgoWorkflowOperation +
+ +(Optional) +

+ +Operation refers to the type of operation performed on the argo workflow +resource. Default value is Submit. +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +

+ +Parameters is the list of parameters to pass to resolved Argo Workflow +object +

+ +
+ +args
\[\]string +
+ +

+ +Args is the list of arguments to pass to the argo CLI +

+ +
+ +

+ +ArtifactLocation +

+ +

+ +(Appears on: +ArgoWorkflowTrigger, +StandardK8STrigger) +

+ +

+ +

+ +ArtifactLocation describes the source location for an external artifact +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +s3
+S3Artifact +
+ +

+ +S3 compliant artifact +

+ +
+ +inline
string +
+ +

+ +Inline artifact is embedded in sensor spec as a string +

+ +
+ +file
+ FileArtifact +
+ +

+ +File artifact is artifact stored in a file +

+ +
+ +url
+URLArtifact +
+ +

+ +URL to fetch the artifact from +

+ +
+ +configmap
+ +Kubernetes core/v1.ConfigMapKeySelector +
+ +

+ +Configmap that stores the artifact +

+ +
+ +git
+GitArtifact +
+ +

+ +Git repository hosting the artifact +

+ +
+ +resource
+ K8SResource +
+ +

+ +Resource is generic template for K8s resource +

+ +
+ +

+ +AuthStrategy (string alias) +

+ +

+ +

+ +(Appears on: +NATSConfig, +NativeStrategy) +

+ +

+ +

+ +AuthStrategy is the auth strategy of native nats installaion +

+ +

+ +

+ +AzureEventHubsTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +AzureEventHubsTrigger refers to specification of the Azure Event Hubs +Trigger +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +fqdn
string +
+ +

+ +FQDN refers to the namespace dns of Azure Event Hubs to be used +i.e. .servicebus.windows.net +

+ +
+ +hubName
string +
+ +

+ +HubName refers to the Azure Event Hub to send events to +

+ +
+ +sharedAccessKeyName
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SharedAccessKeyName refers to the name of the Shared Access Key +

+ +
+ +sharedAccessKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SharedAccessKey refers to a K8s secret containing the primary key for +the +

+ +
+ +payload
+ \[\]TriggerParameter + +
+ +

+ +Payload is the list of key-value extracted from an event payload to +construct the request payload. +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +(Optional) +

+ +Parameters is the list of key-value extracted from event’s payload that +are applied to the trigger resource. +

+ +
+ +

+ +AzureEventsHubEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +AzureEventsHubEventSource describes the event source for azure events +hub More info at +https://docs.microsoft.com/en-us/azure/event-hubs/ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +fqdn
string +
+ +

+ +FQDN of the EventHubs namespace you created More info at +https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string +

+ +
+ +sharedAccessKeyName
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SharedAccessKeyName is the name you chose for your application’s SAS +keys +

+ +
+ +sharedAccessKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SharedAccessKey is the generated value of the key +

+ +
+ +hubName
string +
+ +

+ +Event Hub path/name +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +AzureQueueStorageEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +AzureQueueStorageEventSource describes the event source for azure queue +storage more info at +https://learn.microsoft.com/en-us/azure/storage/queues/ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +storageAccountName
string +
+ +(Optional) +

+ +StorageAccountName is the name of the storage account where the queue +is. This field is necessary to access via Azure AD (managed identity) +and it is ignored if ConnectionString is set. +

+ +
+ +connectionString
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +ConnectionString is the connection string to access Azure Queue Storage. +If this fields is not provided it will try to access via Azure AD with +StorageAccountName. +

+ +
+ +queueName
string +
+ +

+ +QueueName is the name of the queue +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +dlq
bool +
+ +(Optional) +

+ +DLQ specifies if a dead-letter queue is configured for messages that +can’t be processed successfully. If set to true, messages with invalid +payload won’t be acknowledged to allow to forward them farther to the +dead-letter queue. The default value is false. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +decodeMessage
bool +
+ +(Optional) +

+ +DecodeMessage specifies if all the messages should be base64 decoded. If +set to true the decoding is done before the evaluation of JSONBody +

+ +
+ +waitTimeInSeconds
int32 +
+ +(Optional) +

+ +WaitTimeInSeconds is the duration (in seconds) for which the event +source waits between empty results from the queue. The default value is +3 seconds. +

+ +
+ +

+ +AzureServiceBusEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +AzureServiceBusEventSource describes the event source for azure service +bus More info at +https://docs.microsoft.com/en-us/azure/service-bus-messaging/ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +connectionString
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +ConnectionString is the connection string for the Azure Service Bus. If +this fields is not provided it will try to access via Azure AD with +DefaultAzureCredential and FullyQualifiedNamespace. +

+ +
+ +queueName
string +
+ +

+ +QueueName is the name of the Azure Service Bus Queue +

+ +
+ +topicName
string +
+ +

+ +TopicName is the name of the Azure Service Bus Topic +

+ +
+ +subscriptionName
string +
+ +

+ +SubscriptionName is the name of the Azure Service Bus Topic Subscription +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the service bus client +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +fullyQualifiedNamespace
string +
+ +(Optional) +

+ +FullyQualifiedNamespace is the Service Bus namespace name (ex: +myservicebus.servicebus.windows.net). This field is necessary to access +via Azure AD (managed identity) and it is ignored if ConnectionString is +set. +

+ +
+ +

+ +AzureServiceBusTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +connectionString
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +ConnectionString is the connection string for the Azure Service Bus +

+ +
+ +queueName
string +
+ +

+ +QueueName is the name of the Azure Service Bus Queue +

+ +
+ +topicName
string +
+ +

+ +TopicName is the name of the Azure Service Bus Topic +

+ +
+ +subscriptionName
string +
+ +

+ +SubscriptionName is the name of the Azure Service Bus Topic Subscription +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the service bus client +

+ +
+ +payload
+ \[\]TriggerParameter + +
+ +

+ +Payload is the list of key-value extracted from an event payload to +construct the request payload. +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +(Optional) +

+ +Parameters is the list of key-value extracted from event’s payload that +are applied to the trigger resource. +

+ +
+ +

+ +Backoff +

+ +

+ +(Appears on: +AMQPEventSource, +EmitterEventSource, +K8SResourcePolicy, +KafkaEventSource, +MQTTEventSource, +NATSEventsSource, +NSQEventSource, +PulsarEventSource, +PulsarTrigger, +Trigger) +

+ +

+ +

+ +Backoff for an operation +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +duration
+ Int64OrString +
+ +(Optional) +

+ +The initial duration in nanoseconds or strings like “1s”, “3m” +

+ +
+ +factor
+Amount +
+ +(Optional) +

+ +Duration is multiplied by factor each iteration +

+ +
+ +jitter
+Amount +
+ +(Optional) +

+ +The amount of jitter applied each iteration +

+ +
+ +steps
int32 +
+ +(Optional) +

+ +Exit with error after this many steps +

+ +
+ +

+ +BasicAuth +

+ +

+ +(Appears on: +AMQPEventSource, +GerritEventSource, +HTTPTrigger, +MQTTEventSource, +NATSAuth, +SchemaRegistryConfig) +

+ +

+ +

+ +BasicAuth contains the reference to K8s secrets that holds the username +and password +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +username
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Username refers to the Kubernetes secret that holds the username +required for basic auth. +

+ +
+ +password
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Password refers to the Kubernetes secret that holds the password +required for basic auth. +

+ +
+ +

+ +BitbucketAuth +

+ +

+ +(Appears on: +BitbucketEventSource) +

+ +

+ +

+ +BitbucketAuth holds the different auth strategies for connecting to +Bitbucket +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +basic
+ BitbucketBasicAuth + +
+ +(Optional) +

+ +Basic is BasicAuth auth strategy. +

+ +
+ +oauthToken
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +OAuthToken refers to the K8s secret that holds the OAuth Bearer token. +

+ +
+ +

+ +BitbucketBasicAuth +

+ +

+ +(Appears on: +BitbucketAuth) +

+ +

+ +

+ +BasicAuth holds the information required to authenticate user via basic +auth mechanism +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +username
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Username refers to the K8s secret that holds the username. +

+ +
+ +password
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Password refers to the K8s secret that holds the password. +

+ +
+ +

+ +BitbucketEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +BitbucketEventSource describes the event source for Bitbucket +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +deleteHookOnFinish
bool +
+ +(Optional) +

+ +DeleteHookOnFinish determines whether to delete the defined Bitbucket +hook once the event source is stopped. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will be passed along the +event payload. +

+ +
+ +webhook
+ WebhookContext + +
+ +

+ +Webhook refers to the configuration required to run an http server +

+ +
+ +auth
+ BitbucketAuth +
+ +

+ +Auth information required to connect to Bitbucket. +

+ +
+ +events
\[\]string +
+ +

+ +Events this webhook is subscribed to. +

+ +
+ +owner
string +
+ +(Optional) +

+ +DeprecatedOwner is the owner of the repository. Deprecated: use +Repositories instead. Will be unsupported in v1.9 +

+ +
+ +projectKey
string +
+ +(Optional) +

+ +DeprecatedProjectKey is the key of the project to which the repository +relates Deprecated: use Repositories instead. Will be unsupported in +v1.9 +

+ +
+ +repositorySlug
string +
+ +(Optional) +

+ +DeprecatedRepositorySlug is a URL-friendly version of a repository name, +automatically generated by Bitbucket for use in the URL Deprecated: use +Repositories instead. Will be unsupported in v1.9 +

+ +
+ +repositories
+ +\[\]BitbucketRepository +
+ +(Optional) +

+ +Repositories holds a list of repositories for which integration needs to +set up +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +BitbucketRepository +

+ +

+ +(Appears on: +BitbucketEventSource) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +owner
string +
+ +

+ +Owner is the owner of the repository +

+ +
+ +repositorySlug
string +
+ +

+ +RepositorySlug is a URL-friendly version of a repository name, +automatically generated by Bitbucket for use in the URL +

+ +
+ +

+ +BitbucketServerEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +BitbucketServerEventSource refers to event-source related to Bitbucket +Server events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +webhook
+ WebhookContext + +
+ +

+ +Webhook holds configuration to run a http server. +

+ +
+ +projectKey
string +
+ +(Optional) +

+ +DeprecatedProjectKey is the key of project for which integration needs +to set up. Deprecated: use Repositories instead. Will be unsupported in +v1.8. +

+ +
+ +repositorySlug
string +
+ +(Optional) +

+ +DeprecatedRepositorySlug is the slug of the repository for which +integration needs to set up. Deprecated: use Repositories instead. Will +be unsupported in v1.8. +

+ +
+ +projects
\[\]string +
+ +(Optional) +

+ +Projects holds a list of projects for which integration needs to set up, +this will add the webhook to all repositories in the project. +

+ +
+ +repositories
+ +\[\]BitbucketServerRepository +
+ +(Optional) +

+ +Repositories holds a list of repositories for which integration needs to +set up. +

+ +
+ +events
\[\]string +
+ +(Optional) +

+ +Events are bitbucket event to listen to. Refer +https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html +

+ +
+ +skipBranchRefsChangedOnOpenPR
bool +
+ +(Optional) +

+ +SkipBranchRefsChangedOnOpenPR bypasses the event repo:refs_changed for +branches whenever there’s an associated open pull request. This helps in +optimizing the event handling process by avoiding unnecessary triggers +for branch reference changes that are already part of a pull request +under review. +

+ +
+ +accessToken
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +AccessToken is reference to K8s secret which holds the bitbucket api +access information. +

+ +
+ +webhookSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +WebhookSecret is reference to K8s secret which holds the bitbucket +webhook secret (for HMAC validation). +

+ +
+ +bitbucketserverBaseURL
string +
+ +

+ +BitbucketServerBaseURL is the base URL for API requests to a custom +endpoint. +

+ +
+ +deleteHookOnFinish
bool +
+ +(Optional) +

+ +DeleteHookOnFinish determines whether to delete the Bitbucket Server +hook for the project once the event source is stopped. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the bitbucketserver client. +

+ +
+ +checkInterval
string +
+ +(Optional) +

+ +CheckInterval is a duration in which to wait before checking that the +webhooks exist, e.g. 1s, 30m, 2h… (defaults to 1m) +

+ +
+ +

+ +BitbucketServerRepository +

+ +

+ +(Appears on: +BitbucketServerEventSource) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +projectKey
string +
+ +

+ +ProjectKey is the key of project for which integration needs to set up. +

+ +
+ +repositorySlug
string +
+ +

+ +RepositorySlug is the slug of the repository for which integration needs +to set up. +

+ +
+ +

+ +BusConfig +

+ +

+ +(Appears on: +EventBusStatus) +

+ +

+ +

+ +BusConfig has the finalized configuration for EventBus +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +nats
+NATSConfig +
+ +(Optional) +
+ +jetstream
+ JetStreamConfig + +
+ +(Optional) +
+ +kafka
+KafkaBus +
+ +(Optional) +
+ +

+ +CalendarEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +CalendarEventSource describes a time based dependency. One of the fields +(schedule, interval, or recurrence) must be passed. Schedule takes +precedence over interval; interval takes precedence over recurrence +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +schedule
string +
+ +(Optional) +

+ +Schedule is a cron-like expression. For reference, see: +https://en.wikipedia.org/wiki/Cron +

+ +
+ +interval
string +
+ +(Optional) +

+ +Interval is a string that describes an interval duration, e.g. 1s, 30m, +2h… +

+ +
+ +exclusionDates
\[\]string +
+ +

+ +ExclusionDates defines the list of DATE-TIME exceptions for recurring +events. +

+ +
+ +timezone
string +
+ +(Optional) +

+ +Timezone in which to run the schedule +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +persistence
+ EventPersistence + +
+ +

+ +Persistence hold the configuration for event persistence +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +CatchupConfiguration +

+ +

+ +(Appears on: +EventPersistence) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +enabled
bool +
+ +

+ +Enabled enables to triggered the missed schedule when eventsource +restarts +

+ +
+ +maxDuration
string +
+ +

+ +MaxDuration holds max catchup duration +

+ +
+ +

+ +Comparator (string alias) +

+ +

+ +

+ +(Appears on: +DataFilter) +

+ +

+ +

+ +Comparator refers to the comparator operator for a data filter +

+ +

+ +

+ +Condition +

+ +

+ +(Appears on: Status) +

+ +

+ +

+ +Condition contains details about resource state +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +type
+ ConditionType +
+ +

+ +Condition type. +

+ +
+ +status
+ +Kubernetes core/v1.ConditionStatus +
+ +

+ +Condition status, True, False or Unknown. +

+ +
+ +lastTransitionTime
+ +Kubernetes meta/v1.Time +
+ +(Optional) +

+ +Last time the condition transitioned from one status to another. +

+ +
+ +reason
string +
+ +(Optional) +

+ +Unique, this should be a short, machine understandable string that gives +the reason for condition’s last transition. For example, “ImageNotFound” +

+ +
+ +message
string +
+ +(Optional) +

+ +Human-readable message indicating details about last transition. +

+ +
+ +

+ +ConditionType (string alias) +

+ +

+ +

+ +(Appears on: +Condition) +

+ +

+ +

+ +ConditionType is a valid value of Condition.Type +

+ +

+ +

+ +ConditionsResetByTime +

+ +

+ +(Appears on: +ConditionsResetCriteria) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +cron
string +
+ +

+ +Cron is a cron-like expression. For reference, see: +https://en.wikipedia.org/wiki/Cron +

+ +
+ +timezone
string +
+ +(Optional) +
+ +

+ +ConditionsResetCriteria +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +byTime
+ +ConditionsResetByTime +
+ +

+ +Schedule is a cron-like expression. For reference, see: +https://en.wikipedia.org/wiki/Cron +

+ +
+ +

+ +ConfigMapPersistence +

+ +

+ +(Appears on: +EventPersistence) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +name
string +
+ +

+ +Name of the configmap +

+ +
+ +createIfNotExist
bool +
+ +

+ +CreateIfNotExist will create configmap if it doesn’t exists +

+ +
+ +

+ +ContainerTemplate +

+ +

+ +(Appears on: +JetStreamBus, +NativeStrategy) +

+ +

+ +

+ +ContainerTemplate defines customized spec for a container +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +resources
+ +Kubernetes core/v1.ResourceRequirements +
+ +
+ +imagePullPolicy
+ +Kubernetes core/v1.PullPolicy +
+ +
+ +securityContext
+ +Kubernetes core/v1.SecurityContext +
+ +
+ +

+ +CustomTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +CustomTrigger refers to the specification of the custom trigger. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +serverURL
string +
+ +

+ +ServerURL is the url of the gRPC server that executes custom trigger +

+ +
+ +secure
bool +
+ +

+ +Secure refers to type of the connection between sensor to custom trigger +gRPC +

+ +
+ +certSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +CertSecret refers to the secret that contains cert for secure connection +between sensor and custom trigger gRPC server. +

+ +
+ +serverNameOverride
string +
+ +

+ +ServerNameOverride for the secure connection between sensor and custom +trigger gRPC server. +

+ +
+ +spec
map\[string\]string +
+ +

+ +Spec is the custom trigger resource specification that custom trigger +gRPC server knows how to interpret. +

+ +

+ + +
+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +

+ +Parameters is the list of parameters that is applied to resolved custom +trigger trigger object. +

+ +
+ +payload
+ \[\]TriggerParameter + +
+ +

+ +Payload is the list of key-value extracted from an event payload to +construct the request payload. +

+ +
+ +

+ +DataFilter +

+ +

+ +(Appears on: +EventDependencyFilter) +

+ +

+ +

+ +DataFilter describes constraints and filters for event data Regular +Expressions are purposefully not a feature as they are overkill for our +uses here See Rob Pike’s Post: +https://commandcenter.blogspot.com/2011/08/regular-expressions-in-lexing-and.html +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +path
string +
+ +

+ +Path is the JSONPath of the event’s (JSON decoded) data key Path is a +series of keys separated by a dot. A key may contain wildcard characters +‘\*’ and ‘?’. To access an array value use the index as the key. The dot +and wildcard characters can be escaped with ‘’. See +https://github.com/tidwall/gjson#path-syntax +for more information on how to use this. +

+ +
+ +type
+JSONType +
+ +

+ +Type contains the JSON type of the data +

+ +
+ +value
\[\]string +
+ +

+ +Value is the allowed string values for this key Booleans are passed +using strconv.ParseBool() Numbers are parsed using as float64 using +strconv.ParseFloat() Strings are taken as is Nils this value is ignored +

+ +
+ +comparator
+ Comparator +
+ +

+ +Comparator compares the event data with a user given value. Can be +“\>=”, “\>”, “=”, “!=”, “\<”, or “\<=”. Is optional, and if left blank +treated as equality “=”. +

+ +
+ +template
string +
+ +

+ +Template is a go-template for extracting a string from the event’s data. +A Template is evaluated with provided path, type and value. The +templating follows the standard go-template syntax as well as sprig’s +extra functions. See +https://pkg.go.dev/text/template +and +https://masterminds.github.io/sprig/ +

+ +
+ +

+ +EmailTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +EmailTrigger refers to the specification of the email notification +trigger. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +parameters
+ \[\]TriggerParameter + +
+ +(Optional) +

+ +Parameters is the list of key-value extracted from event’s payload that +are applied to the trigger resource. +

+ +
+ +username
string +
+ +(Optional) +

+ +Username refers to the username used to connect to the smtp server. +

+ +
+ +smtpPassword
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +SMTPPassword refers to the Kubernetes secret that holds the smtp +password used to connect to smtp server. +

+ +
+ +host
string +
+ +

+ +Host refers to the smtp host url to which email is send. +

+ +
+ +port
int32 +
+ +(Optional) +

+ +Port refers to the smtp server port to which email is send. Defaults to +0. +

+ +
+ +to
\[\]string +
+ +(Optional) +

+ +To refers to the email addresses to which the emails are send. +

+ +
+ +from
string +
+ +(Optional) +

+ +From refers to the address from which the email is send from. +

+ +
+ +subject
string +
+ +(Optional) +

+ +Subject refers to the subject line for the email send. +

+ +
+ +body
string +
+ +(Optional) +

+ +Body refers to the body/content of the email send. +

+ +
+ +

+ +EmitterEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +EmitterEventSource describes the event source for emitter More info at +https://emitter.io/develop/getting-started/ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +broker
string +
+ +

+ +Broker URI to connect to. +

+ +
+ +channelKey
string +
+ +

+ +ChannelKey refers to the channel key +

+ +
+ +channelName
string +
+ +

+ +ChannelName refers to the channel name +

+ +
+ +username
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Username to use to connect to broker +

+ +
+ +password
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Password to use to connect to broker +

+ +
+ +connectionBackoff
+ Backoff +
+ +(Optional) +

+ +Backoff holds parameters applied to connection. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the emitter client. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +Event +

+ +

+ +

+ +Event represents the cloudevent received from an event source. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +context
+ EventContext +
+ +
+ +data
\[\]byte +
+ +
+ +

+ +EventBus +

+ +

+ +

+ +EventBus is the definition of a eventbus resource +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +metadata
+ +Kubernetes meta/v1.ObjectMeta +
+ +Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+ +spec
+ EventBusSpec +
+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +nats
+NATSBus +
+ +(Optional) +

+ +NATS eventbus +

+ +
+ +jetstream
+ JetStreamBus +
+ +(Optional) +
+ +kafka
+KafkaBus +
+ +(Optional) +

+ +Kafka eventbus +

+ +
+ +jetstreamExotic
+ JetStreamConfig + +
+ +(Optional) +

+ +Exotic JetStream +

+ +
+ +
+ +status
+ EventBusStatus + +
+ +(Optional) +
+ +

+ +EventBusSpec +

+ +

+ +(Appears on: +EventBus) +

+ +

+ +

+ +EventBusSpec refers to specification of eventbus resource +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +nats
+NATSBus +
+ +(Optional) +

+ +NATS eventbus +

+ +
+ +jetstream
+ JetStreamBus +
+ +(Optional) +
+ +kafka
+KafkaBus +
+ +(Optional) +

+ +Kafka eventbus +

+ +
+ +jetstreamExotic
+ JetStreamConfig + +
+ +(Optional) +

+ +Exotic JetStream +

+ +
+ +

+ +EventBusStatus +

+ +

+ +(Appears on: +EventBus) +

+ +

+ +

+ +EventBusStatus holds the status of the eventbus resource +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +Status
+Status +
+ +

+ +(Members of Status are embedded into this type.) +

+ +
+ +config
+BusConfig +
+ +

+ +Config holds the fininalized configuration of EventBus +

+ +
+ +

+ +EventBusType (string alias) +

+ +

+ +

+ +

+ +EventBusType is the type of event bus +

+ +

+ +

+ +EventContext +

+ +

+ +(Appears on: Event, +EventDependencyFilter) +

+ +

+ +

+ +EventContext holds the context of the cloudevent received from an event +source. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +id
string +
+ +

+ +ID of the event; must be non-empty and unique within the scope of the +producer. +

+ +
+ +source
string +
+ +

+ +Source - A URI describing the event producer. +

+ +
+ +specversion
string +
+ +

+ +SpecVersion - The version of the CloudEvents specification used by the +event. +

+ +
+ +type
string +
+ +

+ +Type - The type of the occurrence which has happened. +

+ +
+ +datacontenttype
string +
+ +

+ +DataContentType - A MIME (RFC2046) string describing the media type of +data. +

+ +
+ +subject
string +
+ +

+ +Subject - The subject of the event in the context of the event producer +

+ +
+ +time
+ +Kubernetes meta/v1.Time +
+ +

+ +Time - A Timestamp when the event happened. +

+ +
+ +

+ +EventDependency +

+ +

+ +(Appears on: +SensorSpec) +

+ +

+ +

+ +EventDependency describes a dependency +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +name
string +
+ +

+ +Name is a unique name of this dependency +

+ +
+ +eventSourceName
string +
+ +

+ +EventSourceName is the name of EventSource that Sensor depends on +

+ +
+ +eventName
string +
+ +

+ +EventName is the name of the event +

+ +
+ +filters
+ +EventDependencyFilter +
+ +

+ +Filters and rules governing toleration of success and constraints on the +context and data of an event +

+ +
+ +transform
+ +EventDependencyTransformer +
+ +

+ +Transform transforms the event data +

+ +
+ +filtersLogicalOperator
+ LogicalOperator + +
+ +

+ +FiltersLogicalOperator defines how different filters are evaluated +together. Available values: and (&&), or (\|\|) Is optional and if left +blank treated as and (&&). +

+ +
+ +

+ +EventDependencyFilter +

+ +

+ +(Appears on: +EventDependency) +

+ +

+ +

+ +EventDependencyFilter defines filters and constraints for a event. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +time
+TimeFilter +
+ +

+ +Time filter on the event with escalation +

+ +
+ +context
+ EventContext +
+ +

+ +Context filter constraints +

+ +
+ +data
+\[\]DataFilter +
+ +

+ +Data filter constraints with escalation +

+ +
+ +exprs
+\[\]ExprFilter +
+ +

+ +Exprs contains the list of expressions evaluated against the event +payload. +

+ +
+ +dataLogicalOperator
+ LogicalOperator + +
+ +

+ +DataLogicalOperator defines how multiple Data filters (if defined) are +evaluated together. Available values: and (&&), or (\|\|) Is optional +and if left blank treated as and (&&). +

+ +
+ +exprLogicalOperator
+ LogicalOperator + +
+ +

+ +ExprLogicalOperator defines how multiple Exprs filters (if defined) are +evaluated together. Available values: and (&&), or (\|\|) Is optional +and if left blank treated as and (&&). +

+ +
+ +script
string +
+ +

+ +Script refers to a Lua script evaluated to determine the validity of an +event. +

+ +
+ +

+ +EventDependencyTransformer +

+ +

+ +(Appears on: +EventDependency) +

+ +

+ +

+ +EventDependencyTransformer transforms the event +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +jq
string +
+ +(Optional) +

+ +JQ holds the jq command applied for transformation +

+ +
+ +script
string +
+ +(Optional) +

+ +Script refers to a Lua script used to transform the event +

+ +
+ +

+ +EventPersistence +

+ +

+ +(Appears on: +CalendarEventSource) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +catchup
+ +CatchupConfiguration +
+ +

+ +Catchup enables to triggered the missed schedule when eventsource +restarts +

+ +
+ +configMap
+ +ConfigMapPersistence +
+ +

+ +ConfigMap holds configmap details for persistence +

+ +
+ +

+ +EventSource +

+ +

+ +

+ +EventSource is the definition of a eventsource resource +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +metadata
+ +Kubernetes meta/v1.ObjectMeta +
+ +Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+ +spec
+ EventSourceSpec + +
+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +eventBusName
string +
+ +

+ +EventBusName references to a EventBus name. By default the value is +“default” +

+ +
+ +template
+ Template +
+ +(Optional) +

+ +Template is the pod specification for the event source +

+ +
+ +service
+Service +
+ +(Optional) +

+ +Service is the specifications of the service to expose the event source +

+ +
+ +minio
+map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.S3Artifact + +
+ +

+ +Minio event sources +

+ +
+ +calendar
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.CalendarEventSource + +
+ +

+ +Calendar event sources +

+ +
+ +file
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.FileEventSource + +
+ +

+ +File event sources +

+ +
+ +resource
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.ResourceEventSource + +
+ +

+ +Resource event sources +

+ +
+ +webhook
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.WebhookEventSource + +
+ +

+ +Webhook event sources +

+ +
+ +amqp
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AMQPEventSource + +
+ +

+ +AMQP event sources +

+ +
+ +kafka
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.KafkaEventSource + +
+ +

+ +Kafka event sources +

+ +
+ +mqtt
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.MQTTEventSource + +
+ +

+ +MQTT event sources +

+ +
+ +nats
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.NATSEventsSource + +
+ +

+ +NATS event sources +

+ +
+ +sns
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SNSEventSource + +
+ +

+ +SNS event sources +

+ +
+ +sqs
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SQSEventSource + +
+ +

+ +SQS event sources +

+ +
+ +pubSub
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.PubSubEventSource + +
+ +

+ +PubSub event sources +

+ +
+ +github
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GithubEventSource + +
+ +

+ +Github event sources +

+ +
+ +gitlab
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GitlabEventSource + +
+ +

+ +Gitlab event sources +

+ +
+ +hdfs
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.HDFSEventSource + +
+ +

+ +HDFS event sources +

+ +
+ +slack
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SlackEventSource + +
+ +

+ +Slack event sources +

+ +
+ +storageGrid
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.StorageGridEventSource + +
+ +

+ +StorageGrid event sources +

+ +
+ +azureEventsHub
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureEventsHubEventSource + +
+ +

+ +AzureEventsHub event sources +

+ +
+ +stripe
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.StripeEventSource + +
+ +

+ +Stripe event sources +

+ +
+ +emitter
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.EmitterEventSource + +
+ +

+ +Emitter event source +

+ +
+ +redis
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.RedisEventSource + +
+ +

+ +Redis event source +

+ +
+ +nsq
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.NSQEventSource + +
+ +

+ +NSQ event source +

+ +
+ +pulsar
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.PulsarEventSource + +
+ +

+ +Pulsar event source +

+ +
+ +generic
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GenericEventSource + +
+ +

+ +Generic event source +

+ +
+ +replicas
int32 +
+ +

+ +Replicas is the event source deployment replicas +

+ +
+ +bitbucketserver
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.BitbucketServerEventSource + +
+ +

+ +Bitbucket Server event sources +

+ +
+ +bitbucket
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.BitbucketEventSource + +
+ +

+ +Bitbucket event sources +

+ +
+ +redisStream
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.RedisStreamEventSource + +
+ +

+ +Redis stream source +

+ +
+ +azureServiceBus
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureServiceBusEventSource + +
+ +

+ +Azure Service Bus event source +

+ +
+ +azureQueueStorage
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureQueueStorageEventSource + +
+ +

+ +AzureQueueStorage event source +

+ +
+ +sftp
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SFTPEventSource + +
+ +

+ +SFTP event sources +

+ +
+ +gerrit
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GerritEventSource + +
+ +

+ +Gerrit event source +

+ +
+ +
+ +status
+ EventSourceStatus + +
+ +(Optional) +
+ +

+ +EventSourceFilter +

+ +

+ +(Appears on: +AMQPEventSource, +AzureEventsHubEventSource, +AzureQueueStorageEventSource, +AzureServiceBusEventSource, +BitbucketEventSource, +BitbucketServerEventSource, +CalendarEventSource, +EmitterEventSource, +FileEventSource, +GenericEventSource, +GerritEventSource, +GithubEventSource, +GitlabEventSource, +HDFSEventSource, +KafkaEventSource, +MQTTEventSource, +NATSEventsSource, +NSQEventSource, +PubSubEventSource, +PulsarEventSource, +RedisEventSource, +RedisStreamEventSource, +SFTPEventSource, +SNSEventSource, +SQSEventSource, +SlackEventSource, +WebhookEventSource) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +expression
string +
+ +
+ +

+ +EventSourceSpec +

+ +

+ +(Appears on: +EventSource) +

+ +

+ +

+ +EventSourceSpec refers to specification of event-source resource +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +eventBusName
string +
+ +

+ +EventBusName references to a EventBus name. By default the value is +“default” +

+ +
+ +template
+ Template +
+ +(Optional) +

+ +Template is the pod specification for the event source +

+ +
+ +service
+Service +
+ +(Optional) +

+ +Service is the specifications of the service to expose the event source +

+ +
+ +minio
+map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.S3Artifact + +
+ +

+ +Minio event sources +

+ +
+ +calendar
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.CalendarEventSource + +
+ +

+ +Calendar event sources +

+ +
+ +file
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.FileEventSource + +
+ +

+ +File event sources +

+ +
+ +resource
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.ResourceEventSource + +
+ +

+ +Resource event sources +

+ +
+ +webhook
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.WebhookEventSource + +
+ +

+ +Webhook event sources +

+ +
+ +amqp
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AMQPEventSource + +
+ +

+ +AMQP event sources +

+ +
+ +kafka
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.KafkaEventSource + +
+ +

+ +Kafka event sources +

+ +
+ +mqtt
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.MQTTEventSource + +
+ +

+ +MQTT event sources +

+ +
+ +nats
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.NATSEventsSource + +
+ +

+ +NATS event sources +

+ +
+ +sns
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SNSEventSource + +
+ +

+ +SNS event sources +

+ +
+ +sqs
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SQSEventSource + +
+ +

+ +SQS event sources +

+ +
+ +pubSub
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.PubSubEventSource + +
+ +

+ +PubSub event sources +

+ +
+ +github
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GithubEventSource + +
+ +

+ +Github event sources +

+ +
+ +gitlab
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GitlabEventSource + +
+ +

+ +Gitlab event sources +

+ +
+ +hdfs
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.HDFSEventSource + +
+ +

+ +HDFS event sources +

+ +
+ +slack
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SlackEventSource + +
+ +

+ +Slack event sources +

+ +
+ +storageGrid
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.StorageGridEventSource + +
+ +

+ +StorageGrid event sources +

+ +
+ +azureEventsHub
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureEventsHubEventSource + +
+ +

+ +AzureEventsHub event sources +

+ +
+ +stripe
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.StripeEventSource + +
+ +

+ +Stripe event sources +

+ +
+ +emitter
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.EmitterEventSource + +
+ +

+ +Emitter event source +

+ +
+ +redis
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.RedisEventSource + +
+ +

+ +Redis event source +

+ +
+ +nsq
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.NSQEventSource + +
+ +

+ +NSQ event source +

+ +
+ +pulsar
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.PulsarEventSource + +
+ +

+ +Pulsar event source +

+ +
+ +generic
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GenericEventSource + +
+ +

+ +Generic event source +

+ +
+ +replicas
int32 +
+ +

+ +Replicas is the event source deployment replicas +

+ +
+ +bitbucketserver
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.BitbucketServerEventSource + +
+ +

+ +Bitbucket Server event sources +

+ +
+ +bitbucket
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.BitbucketEventSource + +
+ +

+ +Bitbucket event sources +

+ +
+ +redisStream
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.RedisStreamEventSource + +
+ +

+ +Redis stream source +

+ +
+ +azureServiceBus
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureServiceBusEventSource + +
+ +

+ +Azure Service Bus event source +

+ +
+ +azureQueueStorage
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureQueueStorageEventSource + +
+ +

+ +AzureQueueStorage event source +

+ +
+ +sftp
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SFTPEventSource + +
+ +

+ +SFTP event sources +

+ +
+ +gerrit
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GerritEventSource + +
+ +

+ +Gerrit event source +

+ +
+ +

+ +EventSourceStatus +

+ +

+ +(Appears on: +EventSource) +

+ +

+ +

+ +EventSourceStatus holds the status of the event-source resource +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +Status
+Status +
+ +

+ +(Members of Status are embedded into this type.) +

+ +
+ +

+ +EventSourceType (string alias) +

+ +

+ +

+ +

+ +EventSourceType is the type of event source +

+ +

+ +

+ +ExprFilter +

+ +

+ +(Appears on: +EventDependencyFilter) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +expr
string +
+ +

+ +Expr refers to the expression that determines the outcome of the filter. +

+ +
+ +fields
+ \[\]PayloadField + +
+ +

+ +Fields refers to set of keys that refer to the paths within event +payload. +

+ +
+ +

+ +FileArtifact +

+ +

+ +(Appears on: +ArtifactLocation) +

+ +

+ +

+ +FileArtifact contains information about an artifact in a filesystem +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +path
string +
+ +
+ +

+ +FileEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +FileEventSource describes an event-source for file related events. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +eventType
string +
+ +

+ +Type of file operations to watch Refer +https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go +for more information +

+ +
+ +watchPathConfig
+ WatchPathConfig + +
+ +

+ +WatchPathConfig contains configuration about the file path to watch +

+ +
+ +polling
bool +
+ +

+ +Use polling instead of inotify +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +GenericEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +GenericEventSource refers to a generic event source. It can be used to +implement a custom event source. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +URL of the gRPC server that implements the event source. +

+ +
+ +config
string +
+ +

+ +Config is the event source configuration +

+ +
+ +insecure
bool +
+ +

+ +Insecure determines the type of connection. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +authSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +AuthSecret holds a secret selector that contains a bearer token for +authentication +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +GerritEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +GerritEventSource refers to event-source related to gerrit events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +webhook
+ WebhookContext + +
+ +

+ +Webhook holds configuration to run a http server +

+ +
+ +hookName
string +
+ +

+ +HookName is the name of the webhook +

+ +
+ +events
\[\]string +
+ +

+ +Events are gerrit event to listen to. Refer +https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events +

+ +
+ +auth
+BasicAuth +
+ +(Optional) +

+ +Auth hosts secret selectors for username and password +

+ +
+ +gerritBaseURL
string +
+ +

+ +GerritBaseURL is the base URL for API requests to a custom endpoint +

+ +
+ +deleteHookOnFinish
bool +
+ +(Optional) +

+ +DeleteHookOnFinish determines whether to delete the Gerrit hook for the +project once the event source is stopped. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +projects
\[\]string +
+ +

+ +List of project namespace paths like “whynowy/test”. +

+ +
+ +sslVerify
bool +
+ +(Optional) +

+ +SslVerify to enable ssl verification +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +GitArtifact +

+ +

+ +(Appears on: +ArtifactLocation) +

+ +

+ +

+ +GitArtifact contains information about an artifact stored in git +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +Git URL +

+ +
+ +cloneDirectory
string +
+ +

+ +Directory to clone the repository. We clone complete directory because +GitArtifact is not limited to any specific Git service providers. Hence +we don’t use any specific git provider client. +

+ +
+ +creds
+GitCreds +
+ +(Optional) +

+ +Creds contain reference to git username and password +

+ +
+ +sshKeySecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SSHKeySecret refers to the secret that contains SSH key +

+ +
+ +filePath
string +
+ +

+ +Path to file that contains trigger resource definition +

+ +
+ +branch
string +
+ +(Optional) +

+ +Branch to use to pull trigger resource +

+ +
+ +tag
string +
+ +(Optional) +

+ +Tag to use to pull trigger resource +

+ +
+ +ref
string +
+ +(Optional) +

+ +Ref to use to pull trigger resource. Will result in a shallow clone and +fetch. +

+ +
+ +remote
+ GitRemoteConfig + +
+ +(Optional) +

+ +Remote to manage set of tracked repositories. Defaults to “origin”. +Refer +https://git-scm.com/docs/git-remote +

+ +
+ +insecureIgnoreHostKey
bool +
+ +(Optional) +

+ +Whether to ignore host key +

+ +
+ +

+ +GitCreds +

+ +

+ +(Appears on: +GitArtifact) +

+ +

+ +

+ +GitCreds contain reference to git username and password +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +username
+ +Kubernetes core/v1.SecretKeySelector +
+ +
+ +password
+ +Kubernetes core/v1.SecretKeySelector +
+ +
+ +

+ +GitRemoteConfig +

+ +

+ +(Appears on: +GitArtifact) +

+ +

+ +

+ +GitRemoteConfig contains the configuration of a Git remote +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +name
string +
+ +

+ +Name of the remote to fetch from. +

+ +
+ +urls
\[\]string +
+ +

+ +URLs the URLs of a remote repository. It must be non-empty. Fetch will +always use the first URL, while push will use all of them. +

+ +
+ +

+ +GithubAppCreds +

+ +

+ +(Appears on: +GithubEventSource) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +privateKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +PrivateKey refers to a K8s secret containing the GitHub app private key +

+ +
+ +appID
int64 +
+ +

+ +AppID refers to the GitHub App ID for the application you created +

+ +
+ +installationID
int64 +
+ +

+ +InstallationID refers to the Installation ID of the GitHub app you +created and installed +

+ +
+ +

+ +GithubEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +GithubEventSource refers to event-source for github related events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +id
int64 +
+ +(Optional) +

+ +Id is the webhook’s id Deprecated: This is not used at all, will be +removed in v1.6 +

+ +
+ +webhook
+ WebhookContext + +
+ +

+ +Webhook refers to the configuration required to run a http server +

+ +
+ +owner
string +
+ +(Optional) +

+ +DeprecatedOwner refers to GitHub owner name i.e. argoproj Deprecated: +use Repositories instead. Will be unsupported in v 1.6 +

+ +
+ +repository
string +
+ +(Optional) +

+ +DeprecatedRepository refers to GitHub repo name i.e. argo-events +Deprecated: use Repositories instead. Will be unsupported in v 1.6 +

+ +
+ +events
\[\]string +
+ +

+ +Events refer to Github events to which the event source will subscribe +

+ +
+ +apiToken
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +APIToken refers to a K8s secret containing github api token +

+ +
+ +webhookSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +WebhookSecret refers to K8s secret containing GitHub webhook secret +https://developer.github.com/webhooks/securing/ +

+ +
+ +insecure
bool +
+ +

+ +Insecure tls verification +

+ +
+ +active
bool +
+ +(Optional) +

+ +Active refers to status of the webhook for event deliveries. +https://developer.github.com/webhooks/creating/#active +

+ +
+ +contentType
string +
+ +

+ +ContentType of the event delivery +

+ +
+ +githubBaseURL
string +
+ +(Optional) +

+ +GitHub base URL (for GitHub Enterprise) +

+ +
+ +githubUploadURL
string +
+ +(Optional) +

+ +GitHub upload URL (for GitHub Enterprise) +

+ +
+ +deleteHookOnFinish
bool +
+ +(Optional) +

+ +DeleteHookOnFinish determines whether to delete the GitHub hook for the +repository once the event source is stopped. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +repositories
+ \[\]OwnedRepositories + +
+ +

+ +Repositories holds the information of repositories, which uses repo +owner as the key, and list of repo names as the value. Not required if +Organizations is set. +

+ +
+ +organizations
\[\]string +
+ +

+ +Organizations holds the names of organizations (used for organization +level webhooks). Not required if Repositories is set. +

+ +
+ +githubApp
+ GithubAppCreds + +
+ +(Optional) +

+ +GitHubApp holds the GitHub app credentials +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +GitlabEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +GitlabEventSource refers to event-source related to Gitlab events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +webhook
+ WebhookContext + +
+ +

+ +Webhook holds configuration to run a http server +

+ +
+ +projectID
string +
+ +(Optional) +

+ +DeprecatedProjectID is the id of project for which integration needs to +setup Deprecated: use Projects instead. Will be unsupported in v 1.7 +

+ +
+ +events
\[\]string +
+ +

+ +Events are gitlab event to listen to. Refer +https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794. +

+ +
+ +accessToken
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +AccessToken references to k8 secret which holds the gitlab api access +information +

+ +
+ +enableSSLVerification
bool +
+ +(Optional) +

+ +EnableSSLVerification to enable ssl verification +

+ +
+ +gitlabBaseURL
string +
+ +

+ +GitlabBaseURL is the base URL for API requests to a custom endpoint +

+ +
+ +deleteHookOnFinish
bool +
+ +(Optional) +

+ +DeleteHookOnFinish determines whether to delete the GitLab hook for the +project once the event source is stopped. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +projects
\[\]string +
+ +(Optional) +

+ +List of project IDs or project namespace paths like “whynowy/test”. +Projects and groups cannot be empty at the same time. +

+ +
+ +secretToken
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SecretToken references to k8 secret which holds the Secret Token used by +webhook config +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +groups
\[\]string +
+ +(Optional) +

+ +List of group IDs or group name like “test”. Group level hook available +in Premium and Ultimate Gitlab. +

+ +
+ +

+ +HDFSEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +HDFSEventSource refers to event-source for HDFS related events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +WatchPathConfig
+ WatchPathConfig + +
+ +

+ +(Members of WatchPathConfig are embedded into this type.) +

+ +
+ +type
string +
+ +

+ +Type of file operations to watch +

+ +
+ +checkInterval
string +
+ +

+ +CheckInterval is a string that describes an interval duration to check +the directory state, e.g. 1s, 30m, 2h… (defaults to 1m) +

+ +
+ +addresses
\[\]string +
+ +
+ +hdfsUser
string +
+ +

+ +HDFSUser is the user to access HDFS file system. It is ignored if either +ccache or keytab is used. +

+ +
+ +krbCCacheSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache +or keytab can be set to use Kerberos. +

+ +
+ +krbKeytabSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache +or keytab can be set to use Kerberos. +

+ +
+ +krbUsername
string +
+ +

+ +KrbUsername is the Kerberos username used with Kerberos keytab It must +be set if keytab is used. +

+ +
+ +krbRealm
string +
+ +

+ +KrbRealm is the Kerberos realm used with Kerberos keytab It must be set +if keytab is used. +

+ +
+ +krbConfigConfigMap
+ +Kubernetes core/v1.ConfigMapKeySelector +
+ +

+ +KrbConfig is the configmap selector for Kerberos config as string It +must be set if either ccache or keytab is used. +

+ +
+ +krbServicePrincipalName
string +
+ +

+ +KrbServicePrincipalName is the principal name of Kerberos service It +must be set if either ccache or keytab is used. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +HTTPTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +HTTPTrigger is the trigger for the HTTP request +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +URL refers to the URL to send HTTP request to. +

+ +
+ +payload
+ \[\]TriggerParameter + +
+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the HTTP client. +

+ +
+ +method
string +
+ +(Optional) +

+ +Method refers to the type of the HTTP request. Refer +https://golang.org/src/net/http/method.go +for more info. Default value is POST. +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +

+ +Parameters is the list of key-value extracted from event’s payload that +are applied to the HTTP trigger resource. +

+ +
+ +timeout
int64 +
+ +(Optional) +

+ +Timeout refers to the HTTP request timeout in seconds. Default value is +60 seconds. +

+ +
+ +basicAuth
+ BasicAuth +
+ +(Optional) +

+ +BasicAuth configuration for the http request. +

+ +
+ +headers
map\[string\]string +
+ +(Optional) +

+ +Headers for the HTTP request. +

+ +
+ +secureHeaders
+ +\[\]\*github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SecureHeader + +
+ +(Optional) +

+ +Secure Headers stored in Kubernetes Secrets for the HTTP requests. +

+ +
+ +

+ +Int64OrString +

+ +

+ +(Appears on: +Backoff) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +type
Type + +
+ +
+ +int64Val
int64 +
+ +
+ +strVal
string +
+ +
+ +

+ +JSONType (string alias) +

+ +

+ +

+ +(Appears on: +DataFilter) +

+ +

+ +

+ +JSONType contains the supported JSON types for data filtering +

+ +

+ +

+ +JetStreamBus +

+ +

+ +(Appears on: +EventBusSpec) +

+ +

+ +

+ +JetStreamBus holds the JetStream EventBus information +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +version
string +
+ +

+ +JetStream version, such as “2.7.3” +

+ +
+ +replicas
int32 +
+ +

+ +JetStream StatefulSet size +

+ +
+ +containerTemplate
+ ContainerTemplate + +
+ +(Optional) +

+ +ContainerTemplate contains customized spec for Nats JetStream container +

+ +
+ +reloaderContainerTemplate
+ ContainerTemplate + +
+ +(Optional) +

+ +ReloaderContainerTemplate contains customized spec for config reloader +container +

+ +
+ +metricsContainerTemplate
+ ContainerTemplate + +
+ +(Optional) +

+ +MetricsContainerTemplate contains customized spec for metrics container +

+ +
+ +persistence
+ PersistenceStrategy + +
+ +(Optional) +
+ +metadata
+ Metadata +
+ +

+ +Metadata sets the pods’s metadata, i.e. annotations and labels +

+ +
+ +nodeSelector
map\[string\]string +
+ +(Optional) +

+ +NodeSelector is a selector which must be true for the pod to fit on a +node. Selector which must match a node’s labels for the pod to be +scheduled on that node. More info: +https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +tolerations
+ +\[\]Kubernetes core/v1.Toleration +
+ +(Optional) +

+ +If specified, the pod’s tolerations. +

+ +
+ +securityContext
+ +Kubernetes core/v1.PodSecurityContext +
+ +(Optional) +

+ +SecurityContext holds pod-level security attributes and common container +settings. Optional: Defaults to empty. See type description for default +values of each field. +

+ +
+ +imagePullSecrets
+ +\[\]Kubernetes core/v1.LocalObjectReference +
+ +(Optional) +

+ +ImagePullSecrets is an optional list of references to secrets in the +same namespace to use for pulling any of the images used by this +PodSpec. If specified, these secrets will be passed to individual puller +implementations for them to use. For example, in the case of docker, +only DockerConfig type secrets are honored. More info: +https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod +

+ +
+ +priorityClassName
string +
+ +(Optional) +

+ +If specified, indicates the Redis pod’s priority. “system-node-critical” +and “system-cluster-critical” are two special keywords which indicate +the highest priorities with the former being the highest priority. Any +other name must be defined by creating a PriorityClass object with that +name. If not specified, the pod priority will be default or zero if +there is no default. More info: +https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +

+ +
+ +priority
int32 +
+ +(Optional) +

+ +The priority value. Various system components use this field to find the +priority of the Redis pod. When Priority Admission Controller is +enabled, it prevents users from setting this field. The admission +controller populates this field from PriorityClassName. The higher the +value, the higher the priority. More info: +https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +

+ +
+ +affinity
+ +Kubernetes core/v1.Affinity +
+ +(Optional) +

+ +The pod’s scheduling constraints More info: +https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ +

+ +
+ +serviceAccountName
string +
+ +(Optional) +

+ +ServiceAccountName to apply to the StatefulSet +

+ +
+ +settings
string +
+ +(Optional) +

+ +JetStream configuration, if not specified, global settings in +controller-config will be used. See +https://docs.nats.io/running-a-nats-service/configuration#jetstream. +Only configure “max_memory_store” or “max_file_store”, do not set +“store_dir” as it has been hardcoded. +

+ +
+ +startArgs
\[\]string +
+ +(Optional) +

+ +Optional arguments to start nats-server. For example, “-D” to enable +debugging output, “-DV” to enable debugging and tracing. Check +https://docs.nats.io/ for all the +available arguments. +

+ +
+ +streamConfig
string +
+ +(Optional) +

+ +Optional configuration for the streams to be created in this JetStream +service, if specified, it will be merged with the default configuration +in controller-config. It accepts a YAML format configuration, available +fields include, “maxBytes”, “maxMsgs”, “maxAge” (e.g. 72h), “replicas” +(1, 3, 5), “duplicates” (e.g. 5m), “retention” (e.g. 0: Limits +(default), 1: Interest, 2: WorkQueue), “Discard” (e.g. 0: DiscardOld +(default), 1: DiscardNew). +

+ +
+ +maxPayload
string +
+ +(Optional) +

+ +Maximum number of bytes in a message payload, 0 means unlimited. +Defaults to 1MB +

+ +
+ +

+ +JetStreamConfig +

+ +

+ +(Appears on: +BusConfig, +EventBusSpec) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +JetStream (Nats) URL +

+ +
+ +accessSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Secret for auth +

+ +
+ +streamConfig
string +
+ +(Optional) +
+ +

+ +K8SResource +

+ +

+ +(Appears on: +ArtifactLocation) +

+ +

+ +

+ +K8SResource represent arbitrary structured data. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +value
\[\]byte +
+ +
+ +

+ +K8SResourcePolicy +

+ +

+ +(Appears on: +TriggerPolicy) +

+ +

+ +

+ +K8SResourcePolicy refers to the policy used to check the state of K8s +based triggers using labels +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +labels
map\[string\]string +
+ +

+ +Labels required to identify whether a resource is in success state +

+ +
+ +backoff
+Backoff +
+ +

+ +Backoff before checking resource state +

+ +
+ +errorOnBackoffTimeout
bool +
+ +

+ +ErrorOnBackoffTimeout determines whether sensor should transition to +error state if the trigger policy is unable to determine the state of +the resource +

+ +
+ +

+ +KafkaBus +

+ +

+ +(Appears on: +BusConfig, +EventBusSpec) +

+ +

+ +

+ +KafkaBus holds the KafkaBus EventBus information +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +URL to kafka cluster, multiple URLs separated by comma +

+ +
+ +topic
string +
+ +(Optional) +

+ +Topic name, defaults to {namespace_name}-{eventbus_name} +

+ +
+ +version
string +
+ +(Optional) +

+ +Kafka version, sarama defaults to the oldest supported stable version +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the kafka client. +

+ +
+ +sasl
+SASLConfig +
+ +(Optional) +

+ +SASL configuration for the kafka client +

+ +
+ +consumerGroup
+ KafkaConsumerGroup + +
+ +(Optional) +

+ +Consumer group for kafka client +

+ +
+ +

+ +KafkaConsumerGroup +

+ +

+ +(Appears on: +KafkaBus, +KafkaEventSource) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +groupName
string +
+ +

+ +The name for the consumer group to use +

+ +
+ +oldest
bool +
+ +(Optional) +

+ +When starting up a new group do we want to start from the oldest event +(true) or the newest event (false), defaults to false +

+ +
+ +rebalanceStrategy
string +
+ +(Optional) +

+ +Rebalance strategy can be one of: sticky, roundrobin, range. Range is +the default. +

+ +
+ +

+ +KafkaEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +KafkaEventSource refers to event-source for Kafka related events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +URL to kafka cluster, multiple URLs separated by comma +

+ +
+ +partition
string +
+ +(Optional) +

+ +Partition name +

+ +
+ +topic
string +
+ +

+ +Topic name +

+ +
+ +connectionBackoff
+ Backoff +
+ +

+ +Backoff holds parameters applied to connection. +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the kafka client. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +consumerGroup
+ KafkaConsumerGroup + +
+ +(Optional) +

+ +Consumer group for kafka client +

+ +
+ +limitEventsPerSecond
int64 +
+ +(Optional) +

+ +Sets a limit on how many events get read from kafka per second. +

+ +
+ +version
string +
+ +(Optional) +

+ +Specify what kafka version is being connected to enables certain +features in sarama, defaults to 1.0.0 +

+ +
+ +sasl
+SASLConfig +
+ +(Optional) +

+ +SASL configuration for the kafka client +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +config
string +
+ +(Optional) +

+ +Yaml format Sarama config for Kafka connection. It follows the struct of +sarama.Config. See +https://github.com/IBM/sarama/blob/main/config.go +e.g. +

+ +

+ +consumer: fetch: min: 1 net: MaxOpenRequests: 5 +

+ +
+ +

+ +KafkaTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +KafkaTrigger refers to the specification of the Kafka trigger. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +URL of the Kafka broker, multiple URLs separated by comma. +

+ +
+ +topic
string +
+ +

+ +Name of the topic. More info at +https://kafka.apache.org/documentation/#intro_topics +

+ +
+ +partition
int32 +
+ +(Optional) +

+ +DEPRECATED +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +

+ +Parameters is the list of parameters that is applied to resolved Kafka +trigger object. +

+ +
+ +requiredAcks
int32 +
+ +

+ +RequiredAcks used in producer to tell the broker how many replica +acknowledgements Defaults to 1 (Only wait for the leader to ack). +

+ +
+ +compress
bool +
+ +(Optional) +

+ +Compress determines whether to compress message or not. Defaults to +false. If set to true, compresses message using snappy compression. +

+ +
+ +flushFrequency
int32 +
+ +(Optional) +

+ +FlushFrequency refers to the frequency in milliseconds to flush batches. +Defaults to 500 milliseconds. +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the Kafka producer. +

+ +
+ +payload
+ \[\]TriggerParameter + +
+ +

+ +Payload is the list of key-value extracted from an event payload to +construct the request payload. +

+ +
+ +partitioningKey
string +
+ +

+ +The partitioning key for the messages put on the Kafka topic. +

+ +
+ +version
string +
+ +(Optional) +

+ +Specify what kafka version is being connected to enables certain +features in sarama, defaults to 1.0.0 +

+ +
+ +sasl
+SASLConfig +
+ +(Optional) +

+ +SASL configuration for the kafka client +

+ +
+ +schemaRegistry
+ +SchemaRegistryConfig +
+ +(Optional) +

+ +Schema Registry configuration to producer message with avro format +

+ +
+ +

+ +KubernetesResourceOperation (string alias) +

+ +

+ +

+ +(Appears on: +StandardK8STrigger) +

+ +

+ +

+ +KubernetesResourceOperation refers to the type of operation performed on +the K8s resource +

+ +

+ +

+ +LogTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +intervalSeconds
uint64 +
+ +(Optional) +

+ +Only print messages every interval. Useful to prevent logging too much +data for busy events. +

+ +
+ +

+ +LogicalOperator (string alias) +

+ +

+ +

+ +(Appears on: +EventDependency, +EventDependencyFilter) +

+ +

+ +

+ +

+ +MQTTEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +MQTTEventSource refers to event-source for MQTT related events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +URL to connect to broker +

+ +
+ +topic
string +
+ +

+ +Topic name +

+ +
+ +clientId
string +
+ +

+ +ClientID is the id of the client +

+ +
+ +connectionBackoff
+ Backoff +
+ +

+ +ConnectionBackoff holds backoff applied to connection. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the mqtt client. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +auth
+BasicAuth +
+ +(Optional) +

+ +Auth hosts secret selectors for username and password +

+ +
+ +

+ +Metadata +

+ +

+ +(Appears on: +JetStreamBus, +NativeStrategy, +Template) +

+ +

+ +

+ +Metadata holds the annotations and labels of an event source pod +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +annotations
map\[string\]string +
+ +
+ +labels
map\[string\]string +
+ +
+ +

+ +NATSAuth +

+ +

+ +(Appears on: +NATSEventsSource) +

+ +

+ +

+ +NATSAuth refers to the auth info for NATS EventSource +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +basic
+BasicAuth +
+ +(Optional) +

+ +Baisc auth with username and password +

+ +
+ +token
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Token used to connect +

+ +
+ +nkey
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +NKey used to connect +

+ +
+ +credential
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +credential used to connect +

+ +
+ +

+ +NATSBus +

+ +

+ +(Appears on: +EventBusSpec) +

+ +

+ +

+ +NATSBus holds the NATS eventbus information +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +native
+ NativeStrategy + +
+ +

+ +Native means to bring up a native NATS service +

+ +
+ +exotic
+ NATSConfig +
+ +

+ +Exotic holds an exotic NATS config +

+ +
+ +

+ +NATSConfig +

+ +

+ +(Appears on: +BusConfig, +NATSBus) +

+ +

+ +

+ +NATSConfig holds the config of NATS +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +NATS streaming url +

+ +
+ +clusterID
string +
+ +

+ +Cluster ID for nats streaming +

+ +
+ +auth
+ AuthStrategy +
+ +(Optional) +

+ +Auth strategy, default to AuthStrategyNone +

+ +
+ +accessSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Secret for auth +

+ +
+ +

+ +NATSEventsSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +NATSEventsSource refers to event-source for NATS related events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +URL to connect to NATS cluster +

+ +
+ +subject
string +
+ +

+ +Subject holds the name of the subject onto which messages are published +

+ +
+ +connectionBackoff
+ Backoff +
+ +

+ +ConnectionBackoff holds backoff applied to connection. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the nats client. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +auth
+NATSAuth +
+ +(Optional) +

+ +Auth information +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +queue
string +
+ +(Optional) +

+ +Queue is the name of the queue group to subscribe as if specified. Uses +QueueSubscribe logic to subscribe as queue group. If the queue is empty, +uses default Subscribe logic. +

+ +
+ +

+ +NATSTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +NATSTrigger refers to the specification of the NATS trigger. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +URL of the NATS cluster. +

+ +
+ +subject
string +
+ +

+ +Name of the subject to put message on. +

+ +
+ +payload
+ \[\]TriggerParameter + +
+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the NATS producer. +

+ +
+ +

+ +NSQEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +NSQEventSource describes the event source for NSQ PubSub More info at +https://godoc.org/github.com/nsqio/go-nsq +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +hostAddress
string +
+ +

+ +HostAddress is the address of the host for NSQ lookup +

+ +
+ +topic
string +
+ +

+ +Topic to subscribe to. +

+ +
+ +channel
string +
+ +

+ +Channel used for subscription +

+ +
+ +connectionBackoff
+ Backoff +
+ +(Optional) +

+ +Backoff holds parameters applied to connection. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the nsq client. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +NativeStrategy +

+ +

+ +(Appears on: +NATSBus) +

+ +

+ +

+ +NativeStrategy indicates to install a native NATS service +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +replicas
int32 +
+ +

+ +Size is the NATS StatefulSet size +

+ +
+ +auth
+ AuthStrategy +
+ +
+ +persistence
+ PersistenceStrategy + +
+ +(Optional) +
+ +containerTemplate
+ ContainerTemplate + +
+ +(Optional) +

+ +ContainerTemplate contains customized spec for NATS container +

+ +
+ +metricsContainerTemplate
+ ContainerTemplate + +
+ +(Optional) +

+ +MetricsContainerTemplate contains customized spec for metrics container +

+ +
+ +nodeSelector
map\[string\]string +
+ +(Optional) +

+ +NodeSelector is a selector which must be true for the pod to fit on a +node. Selector which must match a node’s labels for the pod to be +scheduled on that node. More info: +https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +tolerations
+ +\[\]Kubernetes core/v1.Toleration +
+ +(Optional) +

+ +If specified, the pod’s tolerations. +

+ +
+ +metadata
+ Metadata +
+ +

+ +Metadata sets the pods’s metadata, i.e. annotations and labels +

+ +
+ +securityContext
+ +Kubernetes core/v1.PodSecurityContext +
+ +(Optional) +

+ +SecurityContext holds pod-level security attributes and common container +settings. Optional: Defaults to empty. See type description for default +values of each field. +

+ +
+ +maxAge
string +
+ +(Optional) +

+ +Max Age of existing messages, i.e. “72h”, “4h35m” +

+ +
+ +imagePullSecrets
+ +\[\]Kubernetes core/v1.LocalObjectReference +
+ +(Optional) +

+ +ImagePullSecrets is an optional list of references to secrets in the +same namespace to use for pulling any of the images used by this +PodSpec. If specified, these secrets will be passed to individual puller +implementations for them to use. For example, in the case of docker, +only DockerConfig type secrets are honored. More info: +https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod +

+ +
+ +serviceAccountName
string +
+ +(Optional) +

+ +ServiceAccountName to apply to NATS StatefulSet +

+ +
+ +priorityClassName
string +
+ +(Optional) +

+ +If specified, indicates the EventSource pod’s priority. +“system-node-critical” and “system-cluster-critical” are two special +keywords which indicate the highest priorities with the former being the +highest priority. Any other name must be defined by creating a +PriorityClass object with that name. If not specified, the pod priority +will be default or zero if there is no default. More info: +https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +

+ +
+ +priority
int32 +
+ +(Optional) +

+ +The priority value. Various system components use this field to find the +priority of the EventSource pod. When Priority Admission Controller is +enabled, it prevents users from setting this field. The admission +controller populates this field from PriorityClassName. The higher the +value, the higher the priority. More info: +https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +

+ +
+ +affinity
+ +Kubernetes core/v1.Affinity +
+ +(Optional) +

+ +The pod’s scheduling constraints More info: +https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ +

+ +
+ +maxMsgs
uint64 +
+ +

+ +Maximum number of messages per channel, 0 means unlimited. Defaults to +1000000 +

+ +
+ +maxBytes
string +
+ +

+ +Total size of messages per channel, 0 means unlimited. Defaults to 1GB +

+ +
+ +maxSubs
uint64 +
+ +

+ +Maximum number of subscriptions per channel, 0 means unlimited. Defaults +to 1000 +

+ +
+ +maxPayload
string +
+ +

+ +Maximum number of bytes in a message payload, 0 means unlimited. +Defaults to 1MB +

+ +
+ +raftHeartbeatTimeout
string +
+ +

+ +Specifies the time in follower state without a leader before attempting +an election, i.e. “72h”, “4h35m”. Defaults to 2s +

+ +
+ +raftElectionTimeout
string +
+ +

+ +Specifies the time in candidate state without a leader before attempting +an election, i.e. “72h”, “4h35m”. Defaults to 2s +

+ +
+ +raftLeaseTimeout
string +
+ +

+ +Specifies how long a leader waits without being able to contact a quorum +of nodes before stepping down as leader, i.e. “72h”, “4h35m”. Defaults +to 1s +

+ +
+ +raftCommitTimeout
string +
+ +

+ +Specifies the time without an Apply() operation before sending an +heartbeat to ensure timely commit, i.e. “72h”, “4h35m”. Defaults to +100ms +

+ +
+ +

+ +OpenWhiskTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +OpenWhiskTrigger refers to the specification of the OpenWhisk trigger. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +host
string +
+ +

+ +Host URL of the OpenWhisk. +

+ +
+ +version
string +
+ +(Optional) +

+ +Version for the API. Defaults to v1. +

+ +
+ +namespace
string +
+ +

+ +Namespace for the action. Defaults to “\_”. +

+ +
+ +authToken
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +AuthToken for authentication. +

+ +
+ +actionName
string +
+ +

+ +Name of the action/function. +

+ +
+ +payload
+ \[\]TriggerParameter + +
+ +

+ +Payload is the list of key-value extracted from an event payload to +construct the request payload. +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +(Optional) +

+ +Parameters is the list of key-value extracted from event’s payload that +are applied to the trigger resource. +

+ +
+ +

+ +OwnedRepositories +

+ +

+ +(Appears on: +GithubEventSource) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +owner
string +
+ +

+ +Organization or user name +

+ +
+ +names
\[\]string +
+ +

+ +Repository names +

+ +
+ +

+ +PayloadField +

+ +

+ +(Appears on: +ExprFilter) +

+ +

+ +

+ +PayloadField binds a value at path within the event payload against a +name. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +path
string +
+ +

+ +Path is the JSONPath of the event’s (JSON decoded) data key Path is a +series of keys separated by a dot. A key may contain wildcard characters +‘\*’ and ‘?’. To access an array value use the index as the key. The dot +and wildcard characters can be escaped with ‘’. See +https://github.com/tidwall/gjson#path-syntax +for more information on how to use this. +

+ +
+ +name
string +
+ +

+ +Name acts as key that holds the value at the path. +

+ +
+ +

+ +PersistenceStrategy +

+ +

+ +(Appears on: +JetStreamBus, +NativeStrategy) +

+ +

+ +

+ +PersistenceStrategy defines the strategy of persistence +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +storageClassName
string +
+ +(Optional) +

+ +Name of the StorageClass required by the claim. More info: +https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 +

+ +
+ +accessMode
+ +Kubernetes core/v1.PersistentVolumeAccessMode +
+ +(Optional) +

+ +Available access modes such as ReadWriteOnce, ReadWriteMany +https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes +

+ +
+ +volumeSize
+k8s.io/apimachinery/pkg/api/resource.Quantity +
+ +

+ +Volume size, e.g. 10Gi +

+ +
+ +

+ +PubSubEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +PubSubEventSource refers to event-source for GCP PubSub related events. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +projectID
string +
+ +(Optional) +

+ +ProjectID is GCP project ID for the subscription. Required if you run +Argo Events outside of GKE/GCE. (otherwise, the default value is its +project) +

+ +
+ +topicProjectID
string +
+ +(Optional) +

+ +TopicProjectID is GCP project ID for the topic. By default, it is same +as ProjectID. +

+ +
+ +topic
string +
+ +(Optional) +

+ +Topic to which the subscription should belongs. Required if you want the +eventsource to create a new subscription. If you specify this field +along with an existing subscription, it will be verified whether it +actually belongs to the specified topic. +

+ +
+ +subscriptionID
string +
+ +(Optional) +

+ +SubscriptionID is ID of subscription. Required if you use existing +subscription. The default value will be auto generated hash based on +this eventsource setting, so the subscription might be recreated every +time you update the setting, which has a possibility of event loss. +

+ +
+ +credentialSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +CredentialSecret references to the secret that contains JSON credentials +to access GCP. If it is missing, it implicitly uses Workload Identity to +access. +https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity +

+ +
+ +deleteSubscriptionOnFinish
bool +
+ +(Optional) +

+ +DeleteSubscriptionOnFinish determines whether to delete the GCP PubSub +subscription once the event source is stopped. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +PulsarEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +PulsarEventSource describes the event source for Apache Pulsar +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +topics
\[\]string +
+ +

+ +Name of the topics to subscribe to. +

+ +
+ +type
string +
+ +(Optional) +

+ +Type of the subscription. Only “exclusive” and “shared” is supported. +Defaults to exclusive. +

+ +
+ +url
string +
+ +

+ +Configure the service URL for the Pulsar service. +

+ +
+ +tlsTrustCertsSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Trusted TLS certificate secret. +

+ +
+ +tlsAllowInsecureConnection
bool +
+ +(Optional) +

+ +Whether the Pulsar client accept untrusted TLS certificate from broker. +

+ +
+ +tlsValidateHostname
bool +
+ +(Optional) +

+ +Whether the Pulsar client verify the validity of the host name from +broker. +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the pulsar client. +

+ +
+ +connectionBackoff
+ Backoff +
+ +(Optional) +

+ +Backoff holds parameters applied to connection. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +authTokenSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Authentication token for the pulsar client. Either token or athenz can +be set to use auth. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +authAthenzParams
map\[string\]string +
+ +(Optional) +

+ +Authentication athenz parameters for the pulsar client. Refer +https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go +Either token or athenz can be set to use auth. +

+ +
+ +authAthenzSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Authentication athenz privateKey secret for the pulsar client. +AuthAthenzSecret must be set if AuthAthenzParams is used. +

+ +
+ +

+ +PulsarTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +PulsarTrigger refers to the specification of the Pulsar trigger. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +Configure the service URL for the Pulsar service. +

+ +
+ +topic
string +
+ +

+ +Name of the topic. See +https://pulsar.apache.org/docs/en/concepts-messaging/ +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +

+ +Parameters is the list of parameters that is applied to resolved Kafka +trigger object. +

+ +
+ +payload
+ \[\]TriggerParameter + +
+ +

+ +Payload is the list of key-value extracted from an event payload to +construct the request payload. +

+ +
+ +tlsTrustCertsSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Trusted TLS certificate secret. +

+ +
+ +tlsAllowInsecureConnection
bool +
+ +(Optional) +

+ +Whether the Pulsar client accept untrusted TLS certificate from broker. +

+ +
+ +tlsValidateHostname
bool +
+ +(Optional) +

+ +Whether the Pulsar client verify the validity of the host name from +broker. +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the pulsar client. +

+ +
+ +authTokenSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Authentication token for the pulsar client. Either token or athenz can +be set to use auth. +

+ +
+ +connectionBackoff
+ Backoff +
+ +(Optional) +

+ +Backoff holds parameters applied to connection. +

+ +
+ +authAthenzParams
map\[string\]string +
+ +(Optional) +

+ +Authentication athenz parameters for the pulsar client. Refer +https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go +Either token or athenz can be set to use auth. +

+ +
+ +authAthenzSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Authentication athenz privateKey secret for the pulsar client. +AuthAthenzSecret must be set if AuthAthenzParams is used. +

+ +
+ +

+ +RateLimit +

+ +

+ +(Appears on: +Trigger) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +unit
+ RateLimiteUnit + +
+ +

+ +Defaults to Second +

+ +
+ +requestsPerUnit
int32 +
+ +
+ +

+ +RateLimiteUnit (string alias) +

+ +

+ +

+ +(Appears on: +RateLimit) +

+ +

+ +

+ +

+ +RedisEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +RedisEventSource describes an event source for the Redis PubSub. More +info at +https://godoc.org/github.com/go-redis/redis#example-PubSub +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +hostAddress
string +
+ +

+ +HostAddress refers to the address of the Redis host/server +

+ +
+ +password
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Password required for authentication if any. +

+ +
+ +namespace
string +
+ +(Optional) +

+ +Namespace to use to retrieve the password from. It should only be +specified if password is declared +

+ +
+ +db
int32 +
+ +(Optional) +

+ +DB to use. If not specified, default DB 0 will be used. +

+ +
+ +channels
\[\]string +
+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the redis client. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +username
string +
+ +(Optional) +

+ +Username required for ACL style authentication if any. +

+ +
+ +

+ +RedisStreamEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +RedisStreamEventSource describes an event source for Redis streams +(https://redis.io/topics/streams-intro) +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +hostAddress
string +
+ +

+ +HostAddress refers to the address of the Redis host/server (master +instance) +

+ +
+ +password
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +Password required for authentication if any. +

+ +
+ +db
int32 +
+ +(Optional) +

+ +DB to use. If not specified, default DB 0 will be used. +

+ +
+ +streams
\[\]string +
+ +

+ +Streams to look for entries. XREADGROUP is used on all streams using a +single consumer group. +

+ +
+ +maxMsgCountPerRead
int32 +
+ +(Optional) +

+ +MaxMsgCountPerRead holds the maximum number of messages per stream that +will be read in each XREADGROUP of all streams Example: if there are 2 +streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a +total of 20 messages. Same as COUNT option in +XREADGROUP(https://redis.io/topics/streams-intro). +Defaults to 10 +

+ +
+ +consumerGroup
string +
+ +(Optional) +

+ +ConsumerGroup refers to the Redis stream consumer group that will be +created on all redis streams. Messages are read through this group. +Defaults to ‘argo-events-cg’ +

+ +
+ +tls
+TLSConfig +
+ +(Optional) +

+ +TLS configuration for the redis client. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +username
string +
+ +(Optional) +

+ +Username required for ACL style authentication if any. +

+ +
+ +

+ +ResourceEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +ResourceEventSource refers to a event-source for K8s resource related +events. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +namespace
string +
+ +

+ +Namespace where resource is deployed +

+ +
+ +filter
+ ResourceFilter + +
+ +(Optional) +

+ +Filter is applied on the metadata of the resource If you apply filter, +then the internal event informer will only monitor objects that pass the +filter. +

+ +
+ +GroupVersionResource
+ +Kubernetes meta/v1.GroupVersionResource +
+ +

+ +(Members of GroupVersionResource are embedded into this +type.) +

+ +

+ +Group of the resource +

+ +
+ +eventTypes
+ \[\]ResourceEventType + +
+ +

+ +EventTypes is the list of event type to watch. Possible values are - +ADD, UPDATE and DELETE. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +

+ +ResourceEventType (string alias) +

+ +

+ +

+ +(Appears on: +ResourceEventSource) +

+ +

+ +

+ +ResourceEventType is the type of event for the K8s resource mutation +

+ +

+ +

+ +ResourceFilter +

+ +

+ +(Appears on: +ResourceEventSource) +

+ +

+ +

+ +ResourceFilter contains K8s ObjectMeta information to further filter +resource event objects +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +prefix
string +
+ +(Optional) +

+ +Prefix filter is applied on the resource name. +

+ +
+ +labels
+\[\]Selector +
+ +(Optional) +

+ +Labels provide listing options to K8s API to watch resource/s. Refer +https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ +for more info. Unlike K8s field selector, multiple values are passed as +comma separated values instead of list of values. Eg: value: +value1,value2. Same as K8s label selector, operator “=”, “==”, “!=”, +“exists”, “!”, “notin”, “in”, “gt” and “lt” are supported +

+ +
+ +fields
+\[\]Selector +
+ +(Optional) +

+ +Fields provide field filters similar to K8s field selector (see +https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/). +Unlike K8s field selector, it supports arbitrary fileds like +“spec.serviceAccountName”, and the value could be a string or a regex. +Same as K8s field selector, operator “=”, “==” and “!=” are supported. +

+ +
+ +createdBy
+ +Kubernetes meta/v1.Time +
+ +(Optional) +

+ +If resource is created before the specified time then the event is +treated as valid. +

+ +
+ +afterStart
bool +
+ +(Optional) +

+ +If the resource is created after the start time then the event is +treated as valid. +

+ +
+ +

+ +S3Artifact +

+ +

+ +(Appears on: +ArtifactLocation, +EventSourceSpec) +

+ +

+ +

+ +S3Artifact contains information about an S3 connection and bucket +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +endpoint
string +
+ +
+ +bucket
+S3Bucket +
+ +
+ +region
string +
+ +
+ +insecure
bool +
+ +
+ +accessKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +
+ +secretKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +
+ +events
\[\]string +
+ +
+ +filter
+S3Filter +
+ +
+ +metadata
map\[string\]string +
+ +
+ +caCertificate
+ +Kubernetes core/v1.SecretKeySelector +
+ +
+ +

+ +S3Bucket +

+ +

+ +(Appears on: +S3Artifact) +

+ +

+ +

+ +S3Bucket contains information to describe an S3 Bucket +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +key
string +
+ +
+ +name
string +
+ +
+ +

+ +S3Filter +

+ +

+ +(Appears on: +S3Artifact) +

+ +

+ +

+ +S3Filter represents filters to apply to bucket notifications for +specifying constraints on objects +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +prefix
string +
+ +
+ +suffix
string +
+ +
+ +

+ +SASLConfig +

+ +

+ +(Appears on: +KafkaBus, +KafkaEventSource, +KafkaTrigger) +

+ +

+ +

+ +SASLConfig refers to SASL configuration for a client +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +mechanism
string +
+ +(Optional) +

+ +SASLMechanism is the name of the enabled SASL mechanism. Possible +values: OAUTHBEARER, PLAIN (defaults to PLAIN). +

+ +
+ +userSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +User is the authentication identity (authcid) to present for SASL/PLAIN +or SASL/SCRAM authentication +

+ +
+ +passwordSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Password for SASL/PLAIN authentication +

+ +
+ +

+ +SFTPEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +SFTPEventSource describes an event-source for sftp related events. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +eventType
string +
+ +

+ +Type of file operations to watch Refer +https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go +for more information +

+ +
+ +watchPathConfig
+ WatchPathConfig + +
+ +

+ +WatchPathConfig contains configuration about the file path to watch +

+ +
+ +username
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Username required for authentication if any. +

+ +
+ +password
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Password required for authentication if any. +

+ +
+ +sshKeySecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SSHKeySecret refers to the secret that contains SSH key. Key needs to +contain private key and public key. +

+ +
+ +address
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Address sftp address. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +pollIntervalDuration
string +
+ +(Optional) +

+ +PollIntervalDuration the interval at which to poll the SFTP server +defaults to 10 seconds +

+ +
+ +

+ +SNSEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +SNSEventSource refers to event-source for AWS SNS related events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +webhook
+ WebhookContext + +
+ +

+ +Webhook configuration for http server +

+ +
+ +topicArn
string +
+ +

+ +TopicArn +

+ +
+ +accessKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +AccessKey refers K8s secret containing aws access key +

+ +
+ +secretKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SecretKey refers K8s secret containing aws secret key +

+ +
+ +region
string +
+ +

+ +Region is AWS region +

+ +
+ +roleARN
string +
+ +(Optional) +

+ +RoleARN is the Amazon Resource Name (ARN) of the role to assume. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +validateSignature
bool +
+ +(Optional) +

+ +ValidateSignature is boolean that can be set to true for SNS signature +verification +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +endpoint
string +
+ +(Optional) +

+ +Endpoint configures connection to a specific SNS endpoint instead of +Amazons servers +

+ +
+ +

+ +SQSEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +SQSEventSource refers to event-source for AWS SQS related events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +accessKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +AccessKey refers K8s secret containing aws access key +

+ +
+ +secretKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SecretKey refers K8s secret containing aws secret key +

+ +
+ +region
string +
+ +

+ +Region is AWS region +

+ +
+ +queue
string +
+ +

+ +Queue is AWS SQS queue to listen to for messages +

+ +
+ +waitTimeSeconds
int64 +
+ +

+ +WaitTimeSeconds is The duration (in seconds) for which the call waits +for a message to arrive in the queue before returning. +

+ +
+ +roleARN
string +
+ +(Optional) +

+ +RoleARN is the Amazon Resource Name (ARN) of the role to assume. +

+ +
+ +jsonBody
bool +
+ +(Optional) +

+ +JSONBody specifies that all event body payload coming from this source +will be JSON +

+ +
+ +queueAccountId
string +
+ +(Optional) +

+ +QueueAccountID is the ID of the account that created the queue to +monitor +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +dlq
bool +
+ +(Optional) +

+ +DLQ specifies if a dead-letter queue is configured for messages that +can’t be processed successfully. If set to true, messages with invalid +payload won’t be acknowledged to allow to forward them farther to the +dead-letter queue. The default value is false. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +endpoint
string +
+ +(Optional) +

+ +Endpoint configures connection to a specific SQS endpoint instead of +Amazons servers +

+ +
+ +sessionToken
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +SessionToken refers to K8s secret containing AWS temporary +credentials(STS) session token +

+ +
+ +

+ +SchemaRegistryConfig +

+ +

+ +(Appears on: +KafkaTrigger) +

+ +

+ +

+ +SchemaRegistryConfig refers to configuration for a client +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +url
string +
+ +

+ +Schema Registry URL. +

+ +
+ +schemaId
int32 +
+ +

+ +Schema ID +

+ +
+ +auth
+BasicAuth +
+ +(Optional) +

+ +SchemaRegistry - basic authentication +

+ +
+ +

+ +SecureHeader +

+ +

+ +

+ +SecureHeader refers to HTTP Headers with auth tokens as values +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +name
string +
+ +
+ +valueFrom
+ ValueFromSource + +
+ +

+ +Values can be read from either secrets or configmaps +

+ +
+ +

+ +Selector +

+ +

+ +(Appears on: +ResourceFilter) +

+ +

+ +

+ +Selector represents conditional operation to select K8s objects. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +key
string +
+ +

+ +Key name +

+ +
+ +operation
string +
+ +(Optional) +

+ +Supported operations like ==, != etc. Defaults to ==. Refer +https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors +for more info. +

+ +
+ +value
string +
+ +

+ +Value +

+ +
+ +

+ +Sensor +

+ +

+ +

+ +Sensor is the definition of a sensor resource +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +metadata
+ +Kubernetes meta/v1.ObjectMeta +
+ +Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+ +spec
+SensorSpec +
+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +dependencies
+ \[\]EventDependency + +
+ +

+ +Dependencies is a list of the events that this sensor is dependent on. +

+ +
+ +triggers
+\[\]Trigger +
+ +

+ +Triggers is a list of the things that this sensor evokes. These are the +outputs from this sensor. +

+ +
+ +template
+ Template +
+ +(Optional) +

+ +Template is the pod specification for the sensor +

+ +
+ +errorOnFailedRound
bool +
+ +

+ +ErrorOnFailedRound if set to true, marks sensor state as +error if the previous trigger round fails. Once sensor +state is set to error, no further triggers will be +processed. +

+ +
+ +eventBusName
string +
+ +

+ +EventBusName references to a EventBus name. By default the value is +“default” +

+ +
+ +replicas
int32 +
+ +

+ +Replicas is the sensor deployment replicas +

+ +
+ +revisionHistoryLimit
int32 +
+ +(Optional) +

+ +RevisionHistoryLimit specifies how many old deployment revisions to +retain +

+ +
+ +loggingFields
map\[string\]string +
+ +(Optional) +

+ +LoggingFields add additional key-value pairs when logging happens +

+ +
+ +
+ +status
+ SensorStatus +
+ +(Optional) +
+ +

+ +SensorSpec +

+ +

+ +(Appears on: Sensor) +

+ +

+ +

+ +SensorSpec represents desired sensor state +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +dependencies
+ \[\]EventDependency + +
+ +

+ +Dependencies is a list of the events that this sensor is dependent on. +

+ +
+ +triggers
+\[\]Trigger +
+ +

+ +Triggers is a list of the things that this sensor evokes. These are the +outputs from this sensor. +

+ +
+ +template
+ Template +
+ +(Optional) +

+ +Template is the pod specification for the sensor +

+ +
+ +errorOnFailedRound
bool +
+ +

+ +ErrorOnFailedRound if set to true, marks sensor state as +error if the previous trigger round fails. Once sensor +state is set to error, no further triggers will be +processed. +

+ +
+ +eventBusName
string +
+ +

+ +EventBusName references to a EventBus name. By default the value is +“default” +

+ +
+ +replicas
int32 +
+ +

+ +Replicas is the sensor deployment replicas +

+ +
+ +revisionHistoryLimit
int32 +
+ +(Optional) +

+ +RevisionHistoryLimit specifies how many old deployment revisions to +retain +

+ +
+ +loggingFields
map\[string\]string +
+ +(Optional) +

+ +LoggingFields add additional key-value pairs when logging happens +

+ +
+ +

+ +SensorStatus +

+ +

+ +(Appears on: Sensor) +

+ +

+ +

+ +SensorStatus contains information about the status of a sensor. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +Status
+Status +
+ +

+ +(Members of Status are embedded into this type.) +

+ +
+ +

+ +Service +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +Service holds the service information eventsource exposes +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +ports
+ +\[\]Kubernetes core/v1.ServicePort +
+ +

+ +The list of ports that are exposed by this ClusterIP service. +

+ +
+ +clusterIP
string +
+ +(Optional) +

+ +clusterIP is the IP address of the service and is usually assigned +randomly by the master. If an address is specified manually and is not +in use by others, it will be allocated to the service; otherwise, +creation of the service will fail. This field can not be changed through +updates. Valid values are “None”, empty string (“”), or a valid IP +address. “None” can be specified for headless services when proxying is +not required. More info: +https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies +

+ +
+ +

+ +SlackEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +SlackEventSource refers to event-source for Slack related events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +signingSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Slack App signing secret +

+ +
+ +token
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Token for URL verification handshake +

+ +
+ +webhook
+ WebhookContext + +
+ +

+ +Webhook holds configuration for a REST endpoint +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +

+ +SlackSender +

+ +

+ +(Appears on: +SlackTrigger) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +username
string +
+ +(Optional) +

+ +Username is the Slack application’s username +

+ +
+ +icon
string +
+ +(Optional) +

+ +Icon is the Slack application’s icon, e.g. :robot_face: or +https://example.com/image.png +

+ +
+ +

+ +SlackThread +

+ +

+ +(Appears on: +SlackTrigger) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +messageAggregationKey
string +
+ +(Optional) +

+ +MessageAggregationKey allows to aggregate the messages to a thread by +some key. +

+ +
+ +broadcastMessageToChannel
bool +
+ +(Optional) +

+ +BroadcastMessageToChannel allows to also broadcast the message from the +thread to the channel +

+ +
+ +

+ +SlackTrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +SlackTrigger refers to the specification of the slack notification +trigger. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +parameters
+ \[\]TriggerParameter + +
+ +(Optional) +

+ +Parameters is the list of key-value extracted from event’s payload that +are applied to the trigger resource. +

+ +
+ +slackToken
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +SlackToken refers to the Kubernetes secret that holds the slack token +required to send messages. +

+ +
+ +channel
string +
+ +(Optional) +

+ +Channel refers to which Slack channel to send Slack message. +

+ +
+ +message
string +
+ +(Optional) +

+ +Message refers to the message to send to the Slack channel. +

+ +
+ +attachments
string +
+ +(Optional) +

+ +Attachments is a JSON format string that represents an array of Slack +attachments according to the attachments API: +https://api.slack.com/reference/messaging/attachments +. +

+ +
+ +blocks
string +
+ +(Optional) +

+ +Blocks is a JSON format string that represents an array of Slack blocks +according to the blocks API: +https://api.slack.com/reference/block-kit/blocks +. +

+ +
+ +thread
+ SlackThread +
+ +(Optional) +

+ +Thread refers to additional options for sending messages to a Slack +thread. +

+ +
+ +sender
+ SlackSender +
+ +(Optional) +

+ +Sender refers to additional configuration of the Slack application that +sends the message. +

+ +
+ +

+ +StandardK8STrigger +

+ +

+ +(Appears on: +TriggerTemplate) +

+ +

+ +

+ +StandardK8STrigger is the standard Kubernetes resource trigger +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +source
+ ArtifactLocation + +
+ +

+ +Source of the K8s resource file(s) +

+ +
+ +operation
+ +KubernetesResourceOperation +
+ +(Optional) +

+ +Operation refers to the type of operation performed on the k8s resource. +Default value is Create. +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +

+ +Parameters is the list of parameters that is applied to resolved K8s +trigger object. +

+ +
+ +patchStrategy
+k8s.io/apimachinery/pkg/types.PatchType +
+ +(Optional) +

+ +PatchStrategy controls the K8s object patching strategy when the trigger +operation is specified as patch. possible values: +“application/json-patch+json” “application/merge-patch+json” +“application/strategic-merge-patch+json” “application/apply-patch+yaml”. +Defaults to “application/merge-patch+json” +

+ +
+ +liveObject
bool +
+ +(Optional) +

+ +LiveObject specifies whether the resource should be directly fetched +from K8s instead of being marshaled from the resource artifact. If set +to true, the resource artifact must contain the information required to +uniquely identify the resource in the cluster, that is, you must specify +“apiVersion”, “kind” as well as “name” and “namespace” meta data. Only +valid for operation type update +

+ +
+ +

+ +Status +

+ +

+ +(Appears on: +EventBusStatus, +EventSourceStatus, +SensorStatus) +

+ +

+ +

+ +Status is a common structure which can be used for Status field. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +conditions
+ \[\]Condition +
+ +(Optional) +

+ +Conditions are the latest available observations of a resource’s current +state. +

+ +
+ +

+ +StatusPolicy +

+ +

+ +(Appears on: +TriggerPolicy) +

+ +

+ +

+ +StatusPolicy refers to the policy used to check the state of the trigger +using response status +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +allow
\[\]int32 +
+ +
+ +

+ +StorageGridEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +StorageGridEventSource refers to event-source for StorageGrid related +events +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +webhook
+ WebhookContext + +
+ +

+ +Webhook holds configuration for a REST endpoint +

+ +
+ +events
\[\]string +
+ +
+ +filter
+ StorageGridFilter + +
+ +

+ +Filter on object key which caused the notification. +

+ +
+ +topicArn
string +
+ +

+ +TopicArn +

+ +
+ +bucket
string +
+ +

+ +Name of the bucket to register notifications for. +

+ +
+ +region
string +
+ +(Optional) +

+ +S3 region. Defaults to us-east-1 +

+ +
+ +authToken
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +Auth token for storagegrid api +

+ +
+ +apiURL
string +
+ +

+ +APIURL is the url of the storagegrid api. +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +

+ +StorageGridFilter +

+ +

+ +(Appears on: +StorageGridEventSource) +

+ +

+ +

+ +StorageGridFilter represents filters to apply to bucket notifications +for specifying constraints on objects +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +prefix
string +
+ +
+ +suffix
string +
+ +
+ +

+ +StripeEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +StripeEventSource describes the event source for stripe webhook +notifications More info at +https://stripe.com/docs/webhooks +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +webhook
+ WebhookContext + +
+ +

+ +Webhook holds configuration for a REST endpoint +

+ +
+ +createWebhook
bool +
+ +(Optional) +

+ +CreateWebhook if specified creates a new webhook programmatically. +

+ +
+ +apiKey
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +APIKey refers to K8s secret that holds Stripe API key. Used only if +CreateWebhook is enabled. +

+ +
+ +eventFilter
\[\]string +
+ +(Optional) +

+ +EventFilter describes the type of events to listen to. If not specified, +all types of events will be processed. More info at +https://stripe.com/docs/api/events/list +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +

+ +TLSConfig +

+ +

+ +(Appears on: +AMQPEventSource, +AzureServiceBusEventSource, +AzureServiceBusTrigger, +BitbucketServerEventSource, +EmitterEventSource, +HTTPTrigger, +KafkaBus, +KafkaEventSource, +KafkaTrigger, +MQTTEventSource, +NATSEventsSource, +NATSTrigger, +NSQEventSource, +PulsarEventSource, +PulsarTrigger, +RedisEventSource, +RedisStreamEventSource) +

+ +

+ +

+ +TLSConfig refers to TLS configuration for a client. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +caCertSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +CACertSecret refers to the secret that contains the CA cert +

+ +
+ +clientCertSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +ClientCertSecret refers to the secret that contains the client cert +

+ +
+ +clientKeySecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +ClientKeySecret refers to the secret that contains the client key +

+ +
+ +insecureSkipVerify
bool +
+ +(Optional) +

+ +If true, skips creation of TLSConfig with certs and creates an empty +TLSConfig. (Defaults to false) +

+ +
+ +

+ +Template +

+ +

+ +(Appears on: +EventSourceSpec, +SensorSpec) +

+ +

+ +

+ +Template holds the information of a deployment template +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +metadata
+ Metadata +
+ +

+ +Metadata sets the pods’s metadata, i.e. annotations and labels +

+ +
+ +serviceAccountName
string +
+ +(Optional) +

+ +ServiceAccountName is the name of the ServiceAccount to use to run +sensor pod. More info: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +

+ +
+ +container
+ +Kubernetes core/v1.Container +
+ +(Optional) +

+ +Container is the main container image to run in the sensor pod +

+ +
+ +volumes
+ +\[\]Kubernetes core/v1.Volume +
+ +(Optional) +

+ +Volumes is a list of volumes that can be mounted by containers in a +workflow. +

+ +
+ +securityContext
+ +Kubernetes core/v1.PodSecurityContext +
+ +(Optional) +

+ +SecurityContext holds pod-level security attributes and common container +settings. Optional: Defaults to empty. See type description for default +values of each field. +

+ +
+ +nodeSelector
map\[string\]string +
+ +(Optional) +

+ +NodeSelector is a selector which must be true for the pod to fit on a +node. Selector which must match a node’s labels for the pod to be +scheduled on that node. More info: +https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+ +
+ +tolerations
+ +\[\]Kubernetes core/v1.Toleration +
+ +(Optional) +

+ +If specified, the pod’s tolerations. +

+ +
+ +imagePullSecrets
+ +\[\]Kubernetes core/v1.LocalObjectReference +
+ +(Optional) +

+ +ImagePullSecrets is an optional list of references to secrets in the +same namespace to use for pulling any of the images used by this +PodSpec. If specified, these secrets will be passed to individual puller +implementations for them to use. For example, in the case of docker, +only DockerConfig type secrets are honored. More info: +https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod +

+ +
+ +priorityClassName
string +
+ +(Optional) +

+ +If specified, indicates the EventSource pod’s priority. +“system-node-critical” and “system-cluster-critical” are two special +keywords which indicate the highest priorities with the former being the +highest priority. Any other name must be defined by creating a +PriorityClass object with that name. If not specified, the pod priority +will be default or zero if there is no default. More info: +https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +

+ +
+ +priority
int32 +
+ +(Optional) +

+ +The priority value. Various system components use this field to find the +priority of the EventSource pod. When Priority Admission Controller is +enabled, it prevents users from setting this field. The admission +controller populates this field from PriorityClassName. The higher the +value, the higher the priority. More info: +https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +

+ +
+ +affinity
+ +Kubernetes core/v1.Affinity +
+ +(Optional) +

+ +If specified, the pod’s scheduling constraints +

+ +
+ +

+ +TimeFilter +

+ +

+ +(Appears on: +EventDependencyFilter) +

+ +

+ +

+ +TimeFilter describes a window in time. It filters out events that occur +outside the time limits. In other words, only events that occur after +Start and before Stop will pass this filter. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +start
string +
+ +

+ +Start is the beginning of a time window in UTC. Before this time, events +for this dependency are ignored. Format is hh:mm:ss. +

+ +
+ +stop
string +
+ +

+ +Stop is the end of a time window in UTC. After or equal to this time, +events for this dependency are ignored and Format is hh:mm:ss. If it is +smaller than Start, it is treated as next day of Start (e.g.: +22:00:00-01:00:00 means 22:00:00-25:00:00). +

+ +
+ +

+ +Trigger +

+ +

+ +(Appears on: +SensorSpec, +Trigger) +

+ +

+ +

+ +Trigger is an action taken, output produced, an event created, a message +sent +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +template
+ TriggerTemplate + +
+ +

+ +Template describes the trigger specification. +

+ +
+ +parameters
+ \[\]TriggerParameter + +
+ +

+ +Parameters is the list of parameters applied to the trigger template +definition +

+ +
+ +policy
+ TriggerPolicy +
+ +(Optional) +

+ +Policy to configure backoff and execution criteria for the trigger +

+ +
+ +retryStrategy
+ Backoff +
+ +(Optional) +

+ +Retry strategy, defaults to no retry +

+ +
+ +rateLimit
+ RateLimit +
+ +(Optional) +

+ +Rate limit, default unit is Second +

+ +
+ +atLeastOnce
bool +
+ +(Optional) +

+ +AtLeastOnce determines the trigger execution semantics. Defaults to +false. Trigger execution will use at-most-once semantics. If set to +true, Trigger execution will switch to at-least-once semantics. +

+ +
+ +dlqTrigger
+ Trigger +
+ +(Optional) +

+ +If the trigger fails, it will retry up to the configured number of +retries. If the maximum retries are reached and the trigger is set to +execute atLeastOnce, the dead letter queue (DLQ) trigger will be invoked +if specified. Invoking the dead letter queue trigger helps prevent data +loss. +

+ +
+ +

+ +TriggerParameter +

+ +

+ +(Appears on: +AWSLambdaTrigger, +ArgoWorkflowTrigger, +AzureEventHubsTrigger, +AzureServiceBusTrigger, +CustomTrigger, +EmailTrigger, +HTTPTrigger, +KafkaTrigger, +NATSTrigger, +OpenWhiskTrigger, +PulsarTrigger, +SlackTrigger, +StandardK8STrigger, +Trigger) +

+ +

+ +

+ +TriggerParameter indicates a passed parameter to a service template +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +src
+ +TriggerParameterSource +
+ +

+ +Src contains a source reference to the value of the parameter from a +dependency +

+ +
+ +dest
string +
+ +

+ +Dest is the JSONPath of a resource key. A path is a series of keys +separated by a dot. The colon character can be escaped with ‘.’ The -1 +key can be used to append a value to an existing array. See +https://github.com/tidwall/sjson#path-syntax +for more information about how this is used. +

+ +
+ +operation
+ +TriggerParameterOperation +
+ +

+ +Operation is what to do with the existing value at Dest, whether to +‘prepend’, ‘overwrite’, or ‘append’ it. +

+ +
+ +

+ +TriggerParameterOperation (string alias) +

+ +

+ +

+ +(Appears on: +TriggerParameter) +

+ +

+ +

+ +TriggerParameterOperation represents how to set a trigger destination +resource key +

+ +

+ +

+ +TriggerParameterSource +

+ +

+ +(Appears on: +TriggerParameter) +

+ +

+ +

+ +TriggerParameterSource defines the source for a parameter from a event +event +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +dependencyName
string +
+ +

+ +DependencyName refers to the name of the dependency. The event which is +stored for this dependency is used as payload for the parameterization. +Make sure to refer to one of the dependencies you have defined under +Dependencies list. +

+ +
+ +contextKey
string +
+ +

+ +ContextKey is the JSONPath of the event’s (JSON decoded) context key +ContextKey is a series of keys separated by a dot. A key may contain +wildcard characters ‘\*’ and ‘?’. To access an array value use the index +as the key. The dot and wildcard characters can be escaped with +‘’. See +https://github.com/tidwall/gjson#path-syntax +for more information on how to use this. +

+ +
+ +contextTemplate
string +
+ +

+ +ContextTemplate is a go-template for extracting a string from the +event’s context. If a ContextTemplate is provided with a ContextKey, the +template will be evaluated first and fallback to the ContextKey. The +templating follows the standard go-template syntax as well as sprig’s +extra functions. See +https://pkg.go.dev/text/template +and +https://masterminds.github.io/sprig/ +

+ +
+ +dataKey
string +
+ +

+ +DataKey is the JSONPath of the event’s (JSON decoded) data key DataKey +is a series of keys separated by a dot. A key may contain wildcard +characters ‘\*’ and ‘?’. To access an array value use the index as the +key. The dot and wildcard characters can be escaped with ‘’. See +https://github.com/tidwall/gjson#path-syntax +for more information on how to use this. +

+ +
+ +dataTemplate
string +
+ +

+ +DataTemplate is a go-template for extracting a string from the event’s +data. If a DataTemplate is provided with a DataKey, the template will be +evaluated first and fallback to the DataKey. The templating follows the +standard go-template syntax as well as sprig’s extra functions. See +https://pkg.go.dev/text/template +and +https://masterminds.github.io/sprig/ +

+ +
+ +value
string +
+ +

+ +Value is the default literal value to use for this parameter source This +is only used if the DataKey is invalid. If the DataKey is invalid and +this is not defined, this param source will produce an error. +

+ +
+ +useRawData
bool +
+ +(Optional) +

+ +UseRawData indicates if the value in an event at data key should be used +without converting to string. When true, a number, boolean, json or +string parameter may be extracted. When the field is unspecified, or +explicitly false, the behavior is to turn the extracted field into a +string. (e.g. when set to true, the parameter 123 will resolve to the +numerical type, but when false, or not provided, the string “123” will +be resolved) +

+ +
+ +

+ +TriggerPolicy +

+ +

+ +(Appears on: +Trigger) +

+ +

+ +

+ +TriggerPolicy dictates the policy for the trigger retries +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +k8s
+ K8SResourcePolicy + +
+ +

+ +K8SResourcePolicy refers to the policy used to check the state of K8s +based triggers using using labels +

+ +
+ +status
+ StatusPolicy +
+ +

+ +Status refers to the policy used to check the state of the trigger using +response status +

+ +
+ +

+ +TriggerTemplate +

+ +

+ +(Appears on: +Trigger) +

+ +

+ +

+ +TriggerTemplate is the template that describes trigger specification. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +name
string +
+ +

+ +Name is a unique name of the action to take. +

+ +
+ +conditions
string +
+ +(Optional) +

+ +Conditions is the conditions to execute the trigger. For example: +“(dep01 \|\| dep02) && dep04” +

+ +
+ +k8s
+ StandardK8STrigger + +
+ +(Optional) +

+ +StandardK8STrigger refers to the trigger designed to create or update a +generic Kubernetes resource. +

+ +
+ +argoWorkflow
+ ArgoWorkflowTrigger + +
+ +(Optional) +

+ +ArgoWorkflow refers to the trigger that can perform various operations +on an Argo workflow. +

+ +
+ +http
+HTTPTrigger +
+ +(Optional) +

+ +HTTP refers to the trigger designed to dispatch a HTTP request with +on-the-fly constructable payload. +

+ +
+ +awsLambda
+ AWSLambdaTrigger + +
+ +(Optional) +

+ +AWSLambda refers to the trigger designed to invoke AWS Lambda function +with with on-the-fly constructable payload. +

+ +
+ +custom
+ CustomTrigger +
+ +(Optional) +

+ +CustomTrigger refers to the trigger designed to connect to a gRPC +trigger server and execute a custom trigger. +

+ +
+ +kafka
+ KafkaTrigger +
+ +

+ +Kafka refers to the trigger designed to place messages on Kafka topic. +

+ +
+ +nats
+NATSTrigger +
+ +

+ +NATS refers to the trigger designed to place message on NATS subject. +

+ +
+ +slack
+ SlackTrigger +
+ +(Optional) +

+ +Slack refers to the trigger designed to send slack notification message. +

+ +
+ +openWhisk
+ OpenWhiskTrigger + +
+ +(Optional) +

+ +OpenWhisk refers to the trigger designed to invoke OpenWhisk action. +

+ +
+ +log
+LogTrigger +
+ +(Optional) +

+ +Log refers to the trigger designed to invoke log the event. +

+ +
+ +azureEventHubs
+ +AzureEventHubsTrigger +
+ +(Optional) +

+ +AzureEventHubs refers to the trigger send an event to an Azure Event +Hub. +

+ +
+ +pulsar
+ PulsarTrigger +
+ +(Optional) +

+ +Pulsar refers to the trigger designed to place messages on Pulsar topic. +

+ +
+ +conditionsReset
+ +\[\]ConditionsResetCriteria +
+ +(Optional) +

+ +Criteria to reset the conditons +

+ +
+ +azureServiceBus
+ +AzureServiceBusTrigger +
+ +(Optional) +

+ +AzureServiceBus refers to the trigger designed to place messages on +Azure Service Bus +

+ +
+ +email
+ EmailTrigger +
+ +(Optional) +

+ +Email refers to the trigger designed to send an email notification +

+ +
+ +

+ +TriggerType (string alias) +

+ +

+ +

+ +

+ +TriggerType is the type of trigger +

+ +

+ +

+ +Type (int64 alias) +

+ +

+ +

+ +(Appears on: +Int64OrString) +

+ +

+ +

+ +Type represents the stored type of Int64OrString. +

+ +

+ +

+ +URLArtifact +

+ +

+ +(Appears on: +ArtifactLocation) +

+ +

+ +

+ +URLArtifact contains information about an artifact at an http endpoint. +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +path
string +
+ +

+ +Path is the complete URL +

+ +
+ +verifyCert
bool +
+ +

+ +VerifyCert decides whether the connection is secure or not +

+ +
+ +

+ +ValueFromSource +

+ +

+ +(Appears on: +SecureHeader) +

+ +

+ +

+ +ValueFromSource allows you to reference keys from either a Configmap or +Secret +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +secretKeyRef
+ +Kubernetes core/v1.SecretKeySelector +
+ +
+ +configMapKeyRef
+ +Kubernetes core/v1.ConfigMapKeySelector +
+ +
+ +

+ +WatchPathConfig +

+ +

+ +(Appears on: +FileEventSource, +HDFSEventSource, +SFTPEventSource) +

+ +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +directory
string +
+ +

+ +Directory to watch for events +

+ +
+ +path
string +
+ +

+ +Path is relative path of object to watch with respect to the directory +

+ +
+ +pathRegexp
string +
+ +

+ +PathRegexp is regexp of relative path of object to watch with respect to +the directory +

+ +
+ +

+ +WebhookContext +

+ +

+ +(Appears on: +BitbucketEventSource, +BitbucketServerEventSource, +GerritEventSource, +GithubEventSource, +GitlabEventSource, +SNSEventSource, +SlackEventSource, +StorageGridEventSource, +StripeEventSource, +WebhookEventSource) +

+ +

+ +

+ +WebhookContext holds a general purpose REST API context +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +endpoint
string +
+ +

+ +REST API endpoint +

+ +
+ +method
string +
+ +

+ +Method is HTTP request method that indicates the desired action to be +performed for a given resource. See RFC7231 Hypertext Transfer Protocol +(HTTP/1.1): Semantics and Content +

+ +
+ +port
string +
+ +

+ +Port on which HTTP server is listening for incoming events. +

+ +
+ +url
string +
+ +

+ +URL is the url of the server. +

+ +
+ +serverCertSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +ServerCertPath refers the file that contains the cert. +

+ +
+ +serverKeySecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +

+ +ServerKeyPath refers the file that contains private key +

+ +
+ +metadata
map\[string\]string +
+ +(Optional) +

+ +Metadata holds the user defined metadata which will passed along the +event payload. +

+ +
+ +authSecret
+ +Kubernetes core/v1.SecretKeySelector +
+ +(Optional) +

+ +AuthSecret holds a secret selector that contains a bearer token for +authentication +

+ +
+ +maxPayloadSize
int64 +
+ +(Optional) +

+ +MaxPayloadSize is the maximum webhook payload size that the server will +accept. Requests exceeding that limit will be rejected with “request too +large” response. Default value: 1048576 (1MB). +

+ +
+ +

+ +WebhookEventSource +

+ +

+ +(Appears on: +EventSourceSpec) +

+ +

+ +

+ +CalendarEventSource describes an HTTP based EventSource +

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +Field + + +Description +
+ +WebhookContext
+ WebhookContext + +
+ +

+ +(Members of WebhookContext are embedded into this type.) +

+ +
+ +filter
+ EventSourceFilter + +
+ +(Optional) +

+ +Filter +

+ +
+ +
+ +

+ + Generated with gen-crd-api-reference-docs. +

+ + +
+ +
+
+ + + +
+ + + +
+
+
+
+ + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json index 8bd8772f48..d2081bd7c2 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Argo Events - The Event-driven Workflow Automation Framework \u00b6 What is Argo Events? \u00b6 Argo Events is an event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources like webhooks, S3, schedules, messaging queues, gcp pubsub, sns, sqs, etc. Features \u00b6 Supports events from 20+ event sources. Ability to customize business-level constraint logic for workflow automation. Manage everything from simple, linear, real-time to complex, multi-source events. Supports Kubernetes Objects, Argo Workflow, AWS Lambda, Serverless, etc. as triggers. CloudEvents compliant. Getting Started \u00b6 Follow these instruction to set up Argo Events. Documentation \u00b6 Concepts . Argo Events in action . Deep dive into Argo Events . Triggers \u00b6 Argo Workflows Standard K8s Objects HTTP Requests / Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) AWS Lambda NATS Messages Kafka Messages Slack Notifications Azure Event Hubs Messages Argo Rollouts Custom Trigger / Build Your Own Trigger Apache OpenWhisk Log Trigger Event Sources \u00b6 Argo Events supports 20+ event sources. The complete list of event sources is available here . Who uses Argo Events? \u00b6 Check the list to see who are officially using Argo Events. Please send a PR with your organization name if you are using Argo Events. Community Blogs and Presentations \u00b6 Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts Argo Events - Event-Based Dependency Manager for Kubernetes Argo Events Deep-dive Automating Research Workflows at BlackRock Designing A Complete CI/CD Pipeline CI/CD Pipeline Using Argo Events, Workflows, and CD TGI Kubernetes with Joe Beda: CloudEvents and Argo Events","title":"Home"},{"location":"#argo-events-the-event-driven-workflow-automation-framework","text":"","title":"Argo Events - The Event-driven Workflow Automation Framework"},{"location":"#what-is-argo-events","text":"Argo Events is an event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources like webhooks, S3, schedules, messaging queues, gcp pubsub, sns, sqs, etc.","title":"What is Argo Events?"},{"location":"#features","text":"Supports events from 20+ event sources. Ability to customize business-level constraint logic for workflow automation. Manage everything from simple, linear, real-time to complex, multi-source events. Supports Kubernetes Objects, Argo Workflow, AWS Lambda, Serverless, etc. as triggers. CloudEvents compliant.","title":"Features"},{"location":"#getting-started","text":"Follow these instruction to set up Argo Events.","title":"Getting Started"},{"location":"#documentation","text":"Concepts . Argo Events in action . Deep dive into Argo Events .","title":"Documentation"},{"location":"#triggers","text":"Argo Workflows Standard K8s Objects HTTP Requests / Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) AWS Lambda NATS Messages Kafka Messages Slack Notifications Azure Event Hubs Messages Argo Rollouts Custom Trigger / Build Your Own Trigger Apache OpenWhisk Log Trigger","title":"Triggers"},{"location":"#event-sources","text":"Argo Events supports 20+ event sources. The complete list of event sources is available here .","title":"Event Sources"},{"location":"#who-uses-argo-events","text":"Check the list to see who are officially using Argo Events. Please send a PR with your organization name if you are using Argo Events.","title":"Who uses Argo Events?"},{"location":"#community-blogs-and-presentations","text":"Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts Argo Events - Event-Based Dependency Manager for Kubernetes Argo Events Deep-dive Automating Research Workflows at BlackRock Designing A Complete CI/CD Pipeline CI/CD Pipeline Using Argo Events, Workflows, and CD TGI Kubernetes with Joe Beda: CloudEvents and Argo Events","title":"Community Blogs and Presentations"},{"location":"CONTRIBUTING/","text":"Contributing \u00b6 How To Provide Feedback \u00b6 Please raise an issue in Github . Code of Conduct \u00b6 See CNCF Code of Conduct . Contributor Meetings \u00b6 A weekly opportunity for committers and maintainers of Workflows, Events, and Dataflow to discuss their current work and talk about what\u2019s next. Feel free to join us! For Contributor Meeting information, minutes and recordings please see here . How To Contribute \u00b6 We're always looking for contributors. Documentation - something missing or unclear? Please submit a pull request! Code contribution - investigate a good first issue , or anything not assigned. Join the #argo-contributors channel on our Slack . Running Locally \u00b6 To run Argo Events locally for development: developer guide . Dependencies \u00b6 Dependencies increase the risk of security issues and have on-going maintenance costs. The dependency must pass these test: A strong use case. It has an acceptable license (e.g. MIT). It is actively maintained. It has no security issues. Example, should we add fasttemplate , view the Snyk report : Test Outcome A strong use case. \u274c Fail. We can use text/template . It has an acceptable license (e.g. MIT) \u2705 Pass. MIT license. It is actively maintained. \u274c Fail. Project is inactive. It has no security issues. \u2705 Pass. No known security issues. No, we should not add that dependency. Contributor Workshop \u00b6 We have a 90m video on YouTube show you have to get hands-on contributing.","title":"Contributing"},{"location":"CONTRIBUTING/#contributing","text":"","title":"Contributing"},{"location":"CONTRIBUTING/#how-to-provide-feedback","text":"Please raise an issue in Github .","title":"How To Provide Feedback"},{"location":"CONTRIBUTING/#code-of-conduct","text":"See CNCF Code of Conduct .","title":"Code of Conduct"},{"location":"CONTRIBUTING/#contributor-meetings","text":"A weekly opportunity for committers and maintainers of Workflows, Events, and Dataflow to discuss their current work and talk about what\u2019s next. Feel free to join us! For Contributor Meeting information, minutes and recordings please see here .","title":"Contributor Meetings"},{"location":"CONTRIBUTING/#how-to-contribute","text":"We're always looking for contributors. Documentation - something missing or unclear? Please submit a pull request! Code contribution - investigate a good first issue , or anything not assigned. Join the #argo-contributors channel on our Slack .","title":"How To Contribute"},{"location":"CONTRIBUTING/#running-locally","text":"To run Argo Events locally for development: developer guide .","title":"Running Locally"},{"location":"CONTRIBUTING/#dependencies","text":"Dependencies increase the risk of security issues and have on-going maintenance costs. The dependency must pass these test: A strong use case. It has an acceptable license (e.g. MIT). It is actively maintained. It has no security issues. Example, should we add fasttemplate , view the Snyk report : Test Outcome A strong use case. \u274c Fail. We can use text/template . It has an acceptable license (e.g. MIT) \u2705 Pass. MIT license. It is actively maintained. \u274c Fail. Project is inactive. It has no security issues. \u2705 Pass. No known security issues. No, we should not add that dependency.","title":"Dependencies"},{"location":"CONTRIBUTING/#contributor-workshop","text":"We have a 90m video on YouTube show you have to get hands-on contributing.","title":"Contributor Workshop"},{"location":"FAQ/","text":"FAQs \u00b6 Q. How to get started with Argo Events? A . The recommended way to get started with Argo Events is: Read the basic concepts about EventBus , Sensor and Event Source . Install Argo Events as outlined here . Read the tutorials available here . Q. Can I deploy event-source and sensor in a namespace different than argo-events ? A . Yes. If you want to deploy the event-source in a different namespace than argo-events , please update the event-source definition with the desired namespace and service account. Make sure to grant the service account the necessary roles . Q. How to debug Argo-Events. A . Make sure you have installed everything as instructed here . Make sure you have the EventBus resource created within the namespace. The event-bus, event-source and sensor pods must be running. If you see any issue with the pods, check the logs for sensor-controller, event-source-controller and event-bus-controller. If event-source and sensor pods are running, but you are not receiving any events: Make sure you have configured the event source correctly. Check the event-source pod's containers logs. Note: You can set the environment variable LOG_LEVEL:info/debug/error in any of the containers to output debug logs. See here for a debug example. Q. The event-source pod is receiving events but nothing happens. A . Check the sensor resource is deployed and a pod is running for the resource. If the sensor pod is running, check for Started to subscribe events for triggers in the logs. If the sensor has subscribed to the event-bus but is unable to create the trigger resource, please raise an issue on GitHub. The sensor's dependencies have a specific eventSourceName and eventName that should match the values defined in the EventSource resource. See full details here . Q. Helm chart installation does not work. A. The Helm chart for argo events is maintained by the community and can be out of sync with latest release version. The official installation file is available here . If you notice the Helm chart is outdated, we encourage you to contribute to the argo-helm repository on GitHub. Q. Kustomization file doesn't have a X resource. A. The kustomization.yaml file is maintained by the community. If you notice that it is out of sync with the official installation file, please raise a PR. Q. Can I use the Minio event-source for AWS S3 notifications? A. No. The Minio event-source is exclusively for use only with Minio servers. If you want to trigger workloads on an AWS S3 bucket notification, set up the AWS SNS event-source. Q. If I have multiple event dependencies and triggers in a single sensor, can I execute a specific trigger upon a specific event? A. Yes, this functionality is offered by the sensor event resolution circuitry. Please take a look at the Circuit and Switch tutorial. Q. The latest image tag does not point to latest release tag? A. When it comes to image tags, the golden rule is do not trust the latest tag . Always use the pinned version of the images. We will try to keep the latest tag in sync with the most recently released version. Q. Where can I find the event structure for a particular event-source? A. Please refer to this file to understand the structure of different types of events dispatched by the event-source pod.","title":"FAQs"},{"location":"FAQ/#faqs","text":"Q. How to get started with Argo Events? A . The recommended way to get started with Argo Events is: Read the basic concepts about EventBus , Sensor and Event Source . Install Argo Events as outlined here . Read the tutorials available here . Q. Can I deploy event-source and sensor in a namespace different than argo-events ? A . Yes. If you want to deploy the event-source in a different namespace than argo-events , please update the event-source definition with the desired namespace and service account. Make sure to grant the service account the necessary roles . Q. How to debug Argo-Events. A . Make sure you have installed everything as instructed here . Make sure you have the EventBus resource created within the namespace. The event-bus, event-source and sensor pods must be running. If you see any issue with the pods, check the logs for sensor-controller, event-source-controller and event-bus-controller. If event-source and sensor pods are running, but you are not receiving any events: Make sure you have configured the event source correctly. Check the event-source pod's containers logs. Note: You can set the environment variable LOG_LEVEL:info/debug/error in any of the containers to output debug logs. See here for a debug example. Q. The event-source pod is receiving events but nothing happens. A . Check the sensor resource is deployed and a pod is running for the resource. If the sensor pod is running, check for Started to subscribe events for triggers in the logs. If the sensor has subscribed to the event-bus but is unable to create the trigger resource, please raise an issue on GitHub. The sensor's dependencies have a specific eventSourceName and eventName that should match the values defined in the EventSource resource. See full details here . Q. Helm chart installation does not work. A. The Helm chart for argo events is maintained by the community and can be out of sync with latest release version. The official installation file is available here . If you notice the Helm chart is outdated, we encourage you to contribute to the argo-helm repository on GitHub. Q. Kustomization file doesn't have a X resource. A. The kustomization.yaml file is maintained by the community. If you notice that it is out of sync with the official installation file, please raise a PR. Q. Can I use the Minio event-source for AWS S3 notifications? A. No. The Minio event-source is exclusively for use only with Minio servers. If you want to trigger workloads on an AWS S3 bucket notification, set up the AWS SNS event-source. Q. If I have multiple event dependencies and triggers in a single sensor, can I execute a specific trigger upon a specific event? A. Yes, this functionality is offered by the sensor event resolution circuitry. Please take a look at the Circuit and Switch tutorial. Q. The latest image tag does not point to latest release tag? A. When it comes to image tags, the golden rule is do not trust the latest tag . Always use the pinned version of the images. We will try to keep the latest tag in sync with the most recently released version. Q. Where can I find the event structure for a particular event-source? A. Please refer to this file to understand the structure of different types of events dispatched by the event-source pod.","title":"FAQs"},{"location":"developer_guide/","text":"Developer Guide \u00b6 Setup your DEV environment \u00b6 Argo Events is native to Kubernetes so you'll need a running Kubernetes cluster. This guide includes steps for Minikube for local development, but if you have another cluster you can ignore the Minikube specific step 3. Requirements \u00b6 Golang 1.20+ Docker Installation & Setup \u00b6 1. Get the project \u00b6 git clone git@github.com:argoproj/argo-events cd argo-events 2. Start Minikube and point Docker Client to Minikube's Docker Daemon \u00b6 minikube start eval $(minikube docker-env) 3. Build the project \u00b6 make build Changing Types \u00b6 If you're making a change to the pkg/apis package, please ensure you re-run following command for code regeneration. make codegen","title":"Developer Guide"},{"location":"developer_guide/#developer-guide","text":"","title":"Developer Guide"},{"location":"developer_guide/#setup-your-dev-environment","text":"Argo Events is native to Kubernetes so you'll need a running Kubernetes cluster. This guide includes steps for Minikube for local development, but if you have another cluster you can ignore the Minikube specific step 3.","title":"Setup your DEV environment"},{"location":"developer_guide/#requirements","text":"Golang 1.20+ Docker","title":"Requirements"},{"location":"developer_guide/#installation-setup","text":"","title":"Installation & Setup"},{"location":"developer_guide/#1-get-the-project","text":"git clone git@github.com:argoproj/argo-events cd argo-events","title":"1. Get the project"},{"location":"developer_guide/#2-start-minikube-and-point-docker-client-to-minikubes-docker-daemon","text":"minikube start eval $(minikube docker-env)","title":"2. Start Minikube and point Docker Client to Minikube's Docker Daemon"},{"location":"developer_guide/#3-build-the-project","text":"make build","title":"3. Build the project"},{"location":"developer_guide/#changing-types","text":"If you're making a change to the pkg/apis package, please ensure you re-run following command for code regeneration. make codegen","title":"Changing Types"},{"location":"dr_ha_recommendations/","text":"HA/DR Recommendations \u00b6 EventBus \u00b6 A simple EventBus used for non-prod deployment or testing purpose could be: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : auth : token However this is not good enough to run your production deployment, following settings are recommended to make it more reliable, and achieve high availability. Persistent Volumes \u00b6 Even though the EventBus PODs already have data sync mechanism between them, persistent volumes are still recommended to be used to avoid any events data lost when the PODs crash. An EventBus with persistent volumes looks like below: spec : nats : native : auth : token persistence : storageClassName : standard accessMode : ReadWriteOnce volumeSize : 20Gi Anti-Affinity \u00b6 You can run the EventBus PODs with anti-affinity, to avoid the situation that all PODs are gone when a disaster happens. An EventBus with best effort node anti-affinity: spec : nats : native : auth : token affinity : podAntiAffinity : preferredDuringSchedulingIgnoredDuringExecution : - podAffinityTerm : labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname weight : 100 An EventBus with hard requirement node anti-affinity: spec : nats : native : auth : token affinity : podAntiAffinity : requiredDuringSchedulingIgnoredDuringExecution : - labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname To do AZ (Availability Zone) anti-affinity, change the value of topologyKey from kubernetes.io/hostname to topology.kubernetes.io/zone . Besides affinity , nodeSelector and tolerations also could be set through spec.nats.native.nodeSelector and spec.nats.native.tolerations . POD Priority \u00b6 Setting POD Priority could reduce the chance of PODs being evicted. Priority could be set through spec.nats.native.priorityClassName or spec.nats.native.priority . PDB \u00b6 EventBus service is essential to EventSource and Sensor Pods, it would be better to have a PodDisruptionBudget to prevent it from Pod Disruptions . The following PDB object states maxUnavailable is 1, which is suitable for a 3 replica EventBus object. If your EventBus has a name other than default , change it accordingly in the yaml. apiVersion : policy/v1beta1 kind : PodDisruptionBudget metadata : name : eventbus-default-pdb spec : maxUnavailable : 1 selector : matchLabels : controller : eventbus-controller eventbus-name : default EventSources \u00b6 Replicas \u00b6 EventSources can run with HA by setting spec.replicas to a number >1 , see more detail here . EventSource POD Node Selection \u00b6 EventSource POD affinity , nodeSelector and tolerations could be set through spec.template.affinity , spec.template.nodeSelector and spec.template.tolerations . EventSource POD Priority \u00b6 Priority could be set through spec.template.priorityClassName or spec.template.priority . Sensors \u00b6 Replicas \u00b6 Sensors can run with HA by setting spec.replicas to a number >1 , see more detail here . Sensor POD Node Selection \u00b6 Sensor POD affinity , nodeSelector and tolerations could also be set through spec.template.affinity , spec.template.nodeSelector and spec.template.tolerations . Sensor POD Priority \u00b6 Priority could be set through spec.template.priorityClassName or spec.template.priority .","title":"HA/DR Recommendations"},{"location":"dr_ha_recommendations/#hadr-recommendations","text":"","title":"HA/DR Recommendations"},{"location":"dr_ha_recommendations/#eventbus","text":"A simple EventBus used for non-prod deployment or testing purpose could be: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : auth : token However this is not good enough to run your production deployment, following settings are recommended to make it more reliable, and achieve high availability.","title":"EventBus"},{"location":"dr_ha_recommendations/#persistent-volumes","text":"Even though the EventBus PODs already have data sync mechanism between them, persistent volumes are still recommended to be used to avoid any events data lost when the PODs crash. An EventBus with persistent volumes looks like below: spec : nats : native : auth : token persistence : storageClassName : standard accessMode : ReadWriteOnce volumeSize : 20Gi","title":"Persistent Volumes"},{"location":"dr_ha_recommendations/#anti-affinity","text":"You can run the EventBus PODs with anti-affinity, to avoid the situation that all PODs are gone when a disaster happens. An EventBus with best effort node anti-affinity: spec : nats : native : auth : token affinity : podAntiAffinity : preferredDuringSchedulingIgnoredDuringExecution : - podAffinityTerm : labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname weight : 100 An EventBus with hard requirement node anti-affinity: spec : nats : native : auth : token affinity : podAntiAffinity : requiredDuringSchedulingIgnoredDuringExecution : - labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname To do AZ (Availability Zone) anti-affinity, change the value of topologyKey from kubernetes.io/hostname to topology.kubernetes.io/zone . Besides affinity , nodeSelector and tolerations also could be set through spec.nats.native.nodeSelector and spec.nats.native.tolerations .","title":"Anti-Affinity"},{"location":"dr_ha_recommendations/#pod-priority","text":"Setting POD Priority could reduce the chance of PODs being evicted. Priority could be set through spec.nats.native.priorityClassName or spec.nats.native.priority .","title":"POD Priority"},{"location":"dr_ha_recommendations/#pdb","text":"EventBus service is essential to EventSource and Sensor Pods, it would be better to have a PodDisruptionBudget to prevent it from Pod Disruptions . The following PDB object states maxUnavailable is 1, which is suitable for a 3 replica EventBus object. If your EventBus has a name other than default , change it accordingly in the yaml. apiVersion : policy/v1beta1 kind : PodDisruptionBudget metadata : name : eventbus-default-pdb spec : maxUnavailable : 1 selector : matchLabels : controller : eventbus-controller eventbus-name : default","title":"PDB"},{"location":"dr_ha_recommendations/#eventsources","text":"","title":"EventSources"},{"location":"dr_ha_recommendations/#replicas","text":"EventSources can run with HA by setting spec.replicas to a number >1 , see more detail here .","title":"Replicas"},{"location":"dr_ha_recommendations/#eventsource-pod-node-selection","text":"EventSource POD affinity , nodeSelector and tolerations could be set through spec.template.affinity , spec.template.nodeSelector and spec.template.tolerations .","title":"EventSource POD Node Selection"},{"location":"dr_ha_recommendations/#eventsource-pod-priority","text":"Priority could be set through spec.template.priorityClassName or spec.template.priority .","title":"EventSource POD Priority"},{"location":"dr_ha_recommendations/#sensors","text":"","title":"Sensors"},{"location":"dr_ha_recommendations/#replicas_1","text":"Sensors can run with HA by setting spec.replicas to a number >1 , see more detail here .","title":"Replicas"},{"location":"dr_ha_recommendations/#sensor-pod-node-selection","text":"Sensor POD affinity , nodeSelector and tolerations could also be set through spec.template.affinity , spec.template.nodeSelector and spec.template.tolerations .","title":"Sensor POD Node Selection"},{"location":"dr_ha_recommendations/#sensor-pod-priority","text":"Priority could be set through spec.template.priorityClassName or spec.template.priority .","title":"Sensor POD Priority"},{"location":"installation/","text":"Installation \u00b6 Requirements \u00b6 Kubernetes cluster >=v1.11 Installed the kubectl command-line tool >v1.11.0 Using kubectl \u00b6 Cluster-wide Installation \u00b6 Create the namespace. kubectl create namespace argo-events Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml # Install with a validating admission controller kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml NOTE: * On GKE , you may need to grant your account the ability to create new custom resource definitions and clusterroles kubectl create clusterrolebinding YOURNAME - cluster - admin - binding -- clusterrole = cluster - admin -- user = YOUREMAIL @gmail.com * On OpenShift : - Make sure to grant `anyuid` scc to the service accounts . oc adm policy add - scc - to - user anyuid system : serviceaccount : argo - events : argo - events - sa system : serviceaccount : argo - events : argo - events - webhook - sa - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo - events - webhook ClusterRole ( this is necessary for the validating admission controller ) - apiGroups : - rbac . authorization . k8s . io resources : - clusterroles / finalizers verbs : - update - apiGroups : - apps resources : - deployments / finalizers verbs : - update Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Namespace Installation \u00b6 Create the namespace. kubectl create namespace argo-events Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/namespace-install.yaml NOTE: * On GKE , you may need to grant your account the ability to create new custom resource definitions kubectl create clusterrolebinding YOURNAME - cluster - admin - binding -- clusterrole = cluster - admin -- user = YOUREMAIL @gmail.com * On OpenShift : - Make sure to grant `anyuid` scc to the service account . oc adm policy add - scc - to - user anyuid system : serviceaccount : argo - events : default - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo - events - webhook ClusterRole ( this is necessary for the validating admission controller ) - apiGroups : - rbac . authorization . k8s . io resources : - clusterroles / finalizers verbs : - update - apiGroups : - apps resources : - deployments / finalizers verbs : - update Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Using Kustomize \u00b6 Use either cluster-install , or cluster-install-with-extension , or namespace-install folder as your base for Kustomize. kustomization.yaml : bases: - github.com/argoproj/argo-events/manifests/cluster-install # OR - github.com/argoproj/argo-events/manifests/namespace-install Using Helm Chart \u00b6 Make sure you have helm client installed. To install helm, follow the link. Add argoproj repository. helm repo add argo https://argoproj.github.io/argo-helm The helm chart for argo-events is maintained solely by the community and hence the image version for controllers can go out of sync. Update the image version in values.yaml to v1.0.0. Install argo-events chart. helm install argo-events argo/argo-events -n argo-events --create-namespace Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Migrate to v1.0.0 \u00b6 If you are looking to migrate Argo Events <0.16.0 to v1.0.0, please read the migration docs .","title":"Installation"},{"location":"installation/#installation","text":"","title":"Installation"},{"location":"installation/#requirements","text":"Kubernetes cluster >=v1.11 Installed the kubectl command-line tool >v1.11.0","title":"Requirements"},{"location":"installation/#using-kubectl","text":"","title":"Using kubectl"},{"location":"installation/#cluster-wide-installation","text":"Create the namespace. kubectl create namespace argo-events Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml # Install with a validating admission controller kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml NOTE: * On GKE , you may need to grant your account the ability to create new custom resource definitions and clusterroles kubectl create clusterrolebinding YOURNAME - cluster - admin - binding -- clusterrole = cluster - admin -- user = YOUREMAIL @gmail.com * On OpenShift : - Make sure to grant `anyuid` scc to the service accounts . oc adm policy add - scc - to - user anyuid system : serviceaccount : argo - events : argo - events - sa system : serviceaccount : argo - events : argo - events - webhook - sa - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo - events - webhook ClusterRole ( this is necessary for the validating admission controller ) - apiGroups : - rbac . authorization . k8s . io resources : - clusterroles / finalizers verbs : - update - apiGroups : - apps resources : - deployments / finalizers verbs : - update Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml","title":"Cluster-wide Installation"},{"location":"installation/#namespace-installation","text":"Create the namespace. kubectl create namespace argo-events Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/namespace-install.yaml NOTE: * On GKE , you may need to grant your account the ability to create new custom resource definitions kubectl create clusterrolebinding YOURNAME - cluster - admin - binding -- clusterrole = cluster - admin -- user = YOUREMAIL @gmail.com * On OpenShift : - Make sure to grant `anyuid` scc to the service account . oc adm policy add - scc - to - user anyuid system : serviceaccount : argo - events : default - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo - events - webhook ClusterRole ( this is necessary for the validating admission controller ) - apiGroups : - rbac . authorization . k8s . io resources : - clusterroles / finalizers verbs : - update - apiGroups : - apps resources : - deployments / finalizers verbs : - update Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml","title":"Namespace Installation"},{"location":"installation/#using-kustomize","text":"Use either cluster-install , or cluster-install-with-extension , or namespace-install folder as your base for Kustomize. kustomization.yaml : bases: - github.com/argoproj/argo-events/manifests/cluster-install # OR - github.com/argoproj/argo-events/manifests/namespace-install","title":"Using Kustomize"},{"location":"installation/#using-helm-chart","text":"Make sure you have helm client installed. To install helm, follow the link. Add argoproj repository. helm repo add argo https://argoproj.github.io/argo-helm The helm chart for argo-events is maintained solely by the community and hence the image version for controllers can go out of sync. Update the image version in values.yaml to v1.0.0. Install argo-events chart. helm install argo-events argo/argo-events -n argo-events --create-namespace Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml","title":"Using Helm Chart"},{"location":"installation/#migrate-to-v100","text":"If you are looking to migrate Argo Events <0.16.0 to v1.0.0, please read the migration docs .","title":"Migrate to v1.0.0"},{"location":"managed-namespace/","text":"Managed Namespace \u00b6 You can install argo-events in either cluster scoped or namespace scoped configuration, accordingly you need to set up ClusterRole or normal Role for service account argo-events-sa . v1.7+ \u00b6 In namespace scope installation, you must run controller-manager deployment with --namespaced . If you would like to have the controller watching a separate namespace, add --managed-namespace as well. For example: - args: - --namespaced - --managed-namespace - default Prior to v1.7 \u00b6 There were 3 controller deployments ( eventbus-controller , eventsource-controller and sensor-controller ) in the versions prior to v1.7, to run namespaced installation, add --namespaced argument to each of them. Argument --managed-namespace is also supported to watch a different namespace.","title":"Managed Namespace"},{"location":"managed-namespace/#managed-namespace","text":"You can install argo-events in either cluster scoped or namespace scoped configuration, accordingly you need to set up ClusterRole or normal Role for service account argo-events-sa .","title":"Managed Namespace"},{"location":"managed-namespace/#v17","text":"In namespace scope installation, you must run controller-manager deployment with --namespaced . If you would like to have the controller watching a separate namespace, add --managed-namespace as well. For example: - args: - --namespaced - --managed-namespace - default","title":"v1.7+"},{"location":"managed-namespace/#prior-to-v17","text":"There were 3 controller deployments ( eventbus-controller , eventsource-controller and sensor-controller ) in the versions prior to v1.7, to run namespaced installation, add --namespaced argument to each of them. Argument --managed-namespace is also supported to watch a different namespace.","title":"Prior to v1.7"},{"location":"metrics/","text":"Prometheus Metrics \u00b6 v1.3 and after User Metrics \u00b6 Each of generated EventSource, Sensor and EventBus PODs exposes an HTTP endpoint for its metrics, which include things like how many events were generated, how many actions were triggered, and so on. To let your Prometheus server discover those user metrics, add following to your configuration. - job_name: 'argo-events' kubernetes_sd_configs: - role: pod selectors: - role: pod label: 'controller in (eventsource-controller,sensor-controller,eventbus-controller)' relabel_configs: - source_labels: [__meta_kubernetes_pod_label_eventbus_name, __meta_kubernetes_pod_label_controller] action: replace regex: (.+);eventbus-controller replacement: $1 target_label: 'eventbus_name' - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_controller] action: replace regex: (.+);eventbus-controller replacement: $1 target_label: 'namespace' - source_labels: [__address__, __meta_kubernetes_pod_label_controller] action: drop regex: (.+):(\\d222);eventbus-controller Also please make sure your Prometheus Service Account has the permission to do POD discovery. A sample ClusterRole like below needs to be added or merged, and grant it to your Service Account. apiVersion : rbac.authorization.k8s.io/v1 kind : ClusterRole metadata : name : pod-discovery rules : - apiGroups : [ \"\" ] resources : - pods verbs : [ \"get\" , \"list\" , \"watch\" ] EventSource \u00b6 argo_events_event_service_running_total \u00b6 How many configured events in the EventSource object are actively running. argo_events_events_sent_total \u00b6 How many events have been sent successfully. argo_events_events_sent_failed_total \u00b6 How many events failed to send to EventBus. argo_events_events_processing_failed_total \u00b6 How many events failed to process due to all the reasons, it includes argo_events_events_sent_failed_total . argo_events_event_processing_duration_milliseconds \u00b6 Event processing duration (from getting the event to send it to EventBus) in milliseconds. Sensor \u00b6 argo_events_action_triggered_total \u00b6 How many actions have been triggered successfully. argo_events_action_failed_total \u00b6 How many actions failed. argo_events_action_retries_failed_total \u00b6 How many actions failed after the retries have been exhausted. This is also incremented if there is no retryStrategy specified. argo_events_action_duration_milliseconds \u00b6 Action triggering duration. EventBus \u00b6 For native NATS EventBus, check this link for the metrics explanation. Controller Metrics \u00b6 If you are interested in Argo Events controller metrics, add following to your Prometheus configuration. - job_name: 'argo-events-controllers' kubernetes_sd_configs: - role: pod selectors: - role: pod label: 'app in (eventsource-controller,sensor-controller,eventbus-controller)' relabel_configs: - source_labels: [__address__, __meta_kubernetes_pod_label_app] action: replace regex: (.+);(eventsource-controller|sensor-controller|eventbus-controller) replacement: $1:7777 target_label: '__address__' - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_app] action: replace regex: (.+);(eventsource-controller|sensor-controller|eventbus-controller) replacement: $1 target_label: 'namespace' Golden Signals \u00b6 Following metrics are considered as Golden Signals of monitoring your applications running with Argo Events. Latency argo_events_event_processing_duration_milliseconds argo_events_action_duration_milliseconds Traffic argo_events_events_sent_total argo_events_action_triggered_total Errors argo_events_events_processing_failed_total argo_events_events_sent_failed_total argo_events_action_failed_total argo_events_action_retries_failed_total Saturation argo_events_event_service_running_total . Other Kubernetes metrics such as CPU or memory.","title":"Prometheus Metrics"},{"location":"metrics/#prometheus-metrics","text":"v1.3 and after","title":"Prometheus Metrics"},{"location":"metrics/#user-metrics","text":"Each of generated EventSource, Sensor and EventBus PODs exposes an HTTP endpoint for its metrics, which include things like how many events were generated, how many actions were triggered, and so on. To let your Prometheus server discover those user metrics, add following to your configuration. - job_name: 'argo-events' kubernetes_sd_configs: - role: pod selectors: - role: pod label: 'controller in (eventsource-controller,sensor-controller,eventbus-controller)' relabel_configs: - source_labels: [__meta_kubernetes_pod_label_eventbus_name, __meta_kubernetes_pod_label_controller] action: replace regex: (.+);eventbus-controller replacement: $1 target_label: 'eventbus_name' - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_controller] action: replace regex: (.+);eventbus-controller replacement: $1 target_label: 'namespace' - source_labels: [__address__, __meta_kubernetes_pod_label_controller] action: drop regex: (.+):(\\d222);eventbus-controller Also please make sure your Prometheus Service Account has the permission to do POD discovery. A sample ClusterRole like below needs to be added or merged, and grant it to your Service Account. apiVersion : rbac.authorization.k8s.io/v1 kind : ClusterRole metadata : name : pod-discovery rules : - apiGroups : [ \"\" ] resources : - pods verbs : [ \"get\" , \"list\" , \"watch\" ]","title":"User Metrics"},{"location":"metrics/#eventsource","text":"","title":"EventSource"},{"location":"metrics/#argo_events_event_service_running_total","text":"How many configured events in the EventSource object are actively running.","title":"argo_events_event_service_running_total"},{"location":"metrics/#argo_events_events_sent_total","text":"How many events have been sent successfully.","title":"argo_events_events_sent_total"},{"location":"metrics/#argo_events_events_sent_failed_total","text":"How many events failed to send to EventBus.","title":"argo_events_events_sent_failed_total"},{"location":"metrics/#argo_events_events_processing_failed_total","text":"How many events failed to process due to all the reasons, it includes argo_events_events_sent_failed_total .","title":"argo_events_events_processing_failed_total"},{"location":"metrics/#argo_events_event_processing_duration_milliseconds","text":"Event processing duration (from getting the event to send it to EventBus) in milliseconds.","title":"argo_events_event_processing_duration_milliseconds"},{"location":"metrics/#sensor","text":"","title":"Sensor"},{"location":"metrics/#argo_events_action_triggered_total","text":"How many actions have been triggered successfully.","title":"argo_events_action_triggered_total"},{"location":"metrics/#argo_events_action_failed_total","text":"How many actions failed.","title":"argo_events_action_failed_total"},{"location":"metrics/#argo_events_action_retries_failed_total","text":"How many actions failed after the retries have been exhausted. This is also incremented if there is no retryStrategy specified.","title":"argo_events_action_retries_failed_total"},{"location":"metrics/#argo_events_action_duration_milliseconds","text":"Action triggering duration.","title":"argo_events_action_duration_milliseconds"},{"location":"metrics/#eventbus","text":"For native NATS EventBus, check this link for the metrics explanation.","title":"EventBus"},{"location":"metrics/#controller-metrics","text":"If you are interested in Argo Events controller metrics, add following to your Prometheus configuration. - job_name: 'argo-events-controllers' kubernetes_sd_configs: - role: pod selectors: - role: pod label: 'app in (eventsource-controller,sensor-controller,eventbus-controller)' relabel_configs: - source_labels: [__address__, __meta_kubernetes_pod_label_app] action: replace regex: (.+);(eventsource-controller|sensor-controller|eventbus-controller) replacement: $1:7777 target_label: '__address__' - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_app] action: replace regex: (.+);(eventsource-controller|sensor-controller|eventbus-controller) replacement: $1 target_label: 'namespace'","title":"Controller Metrics"},{"location":"metrics/#golden-signals","text":"Following metrics are considered as Golden Signals of monitoring your applications running with Argo Events. Latency argo_events_event_processing_duration_milliseconds argo_events_action_duration_milliseconds Traffic argo_events_events_sent_total argo_events_action_triggered_total Errors argo_events_events_processing_failed_total argo_events_events_sent_failed_total argo_events_action_failed_total argo_events_action_retries_failed_total Saturation argo_events_event_service_running_total . Other Kubernetes metrics such as CPU or memory.","title":"Golden Signals"},{"location":"quick_start/","text":"Getting Started \u00b6 We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. Note: You will need to have Argo Workflows installed to make this work. The Argo Workflow controller will need to be configured to listen for Workflow objects created in argo-events namespace. (See this link.) The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no \"--namespaced\" argument) so that it has visiblity to all namespaces, or with \"--managed-namespace\" set to define \"argo-events\" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file, setting ARGO_WORKFLOWS_VERSION with your desired version. A list of versions can be found by viewing these project tags in the Argo Workflow GitHub repository. export ARGO_WORKFLOWS_VERSION = 3.5 . 4 kubectl create namespace argo kubectl apply - n argo - f https : // github . com / argoproj / argo - workflows / releases / download / v $ ARGO_WORKFLOWS_VERSION / install . yaml Install Argo Events kubectl create namespace argo-events kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml # Install with a validating admission controller kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml Make sure to have the eventbus pods running in the namespace. Run following command to create the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Setup event-source for webhook as follows. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml The above event-source contains a single event configuration that runs an HTTP server on port 12000 with endpoint example . After running the above command, the event-source controller will create a pod and service. Create a service account with RBAC settings to allow the sensor to trigger workflows, and allow workflows to function. # sensor rbac kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml # workflow rbac kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/workflow-rbac.yaml Create webhook sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml Once the sensor object is created, sensor controller will create corresponding pod and a service. Expose the event-source pod via Ingress, OpenShift Route or port forward to consume requests over HTTP. kubectl -n argo-events port-forward $(kubectl -n argo-events get pod -l eventsource-name=webhook -o name) 12000:12000 & Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Verify that an Argo workflow was triggered. kubectl -n argo-events get workflows | grep \"webhook\"","title":"Getting Started"},{"location":"quick_start/#getting-started","text":"We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. Note: You will need to have Argo Workflows installed to make this work. The Argo Workflow controller will need to be configured to listen for Workflow objects created in argo-events namespace. (See this link.) The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no \"--namespaced\" argument) so that it has visiblity to all namespaces, or with \"--managed-namespace\" set to define \"argo-events\" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file, setting ARGO_WORKFLOWS_VERSION with your desired version. A list of versions can be found by viewing these project tags in the Argo Workflow GitHub repository. export ARGO_WORKFLOWS_VERSION = 3.5 . 4 kubectl create namespace argo kubectl apply - n argo - f https : // github . com / argoproj / argo - workflows / releases / download / v $ ARGO_WORKFLOWS_VERSION / install . yaml Install Argo Events kubectl create namespace argo-events kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml # Install with a validating admission controller kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml Make sure to have the eventbus pods running in the namespace. Run following command to create the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Setup event-source for webhook as follows. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml The above event-source contains a single event configuration that runs an HTTP server on port 12000 with endpoint example . After running the above command, the event-source controller will create a pod and service. Create a service account with RBAC settings to allow the sensor to trigger workflows, and allow workflows to function. # sensor rbac kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml # workflow rbac kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/workflow-rbac.yaml Create webhook sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml Once the sensor object is created, sensor controller will create corresponding pod and a service. Expose the event-source pod via Ingress, OpenShift Route or port forward to consume requests over HTTP. kubectl -n argo-events port-forward $(kubectl -n argo-events get pod -l eventsource-name=webhook -o name) 12000:12000 & Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Verify that an Argo workflow was triggered. kubectl -n argo-events get workflows | grep \"webhook\"","title":"Getting Started"},{"location":"releases/","text":"Releases \u00b6 Latest releases Supported Versions \u00b6 Versions are expressed as x.y.z, where x is the major version, y is the minor version, and z is the patch version, following Semantic Versioning terminology. We maintain release branches for the most recent two minor releases. Fixes may be backported to release branches, depending on severity, risk, and, feasibility. If a release contains breaking changes, or CVE fixes, this will documented in the release notes. Supported Version Skew \u00b6 Image versions of eventsource , sensor , eventbus-controller , eventsource-controller , sensor-controller and events-webhook should be the same. Release Cycle \u00b6 For unstable , we build and tag latest images for every commit to master. New minor versions are released roughly every 2 months. Release candidates for each release are typically available for 2 weeks before the release becomes generally available. Otherwise, we typically patch the release as needed.","title":"Releases"},{"location":"releases/#releases","text":"Latest releases","title":"Releases"},{"location":"releases/#supported-versions","text":"Versions are expressed as x.y.z, where x is the major version, y is the minor version, and z is the patch version, following Semantic Versioning terminology. We maintain release branches for the most recent two minor releases. Fixes may be backported to release branches, depending on severity, risk, and, feasibility. If a release contains breaking changes, or CVE fixes, this will documented in the release notes.","title":"Supported Versions"},{"location":"releases/#supported-version-skew","text":"Image versions of eventsource , sensor , eventbus-controller , eventsource-controller , sensor-controller and events-webhook should be the same.","title":"Supported Version Skew"},{"location":"releases/#release-cycle","text":"For unstable , we build and tag latest images for every commit to master. New minor versions are released roughly every 2 months. Release candidates for each release are typically available for 2 weeks before the release becomes generally available. Otherwise, we typically patch the release as needed.","title":"Release Cycle"},{"location":"security/","text":"Security \u00b6 Please see SECURITY.md","title":"Security"},{"location":"security/#security","text":"Please see SECURITY.md","title":"Security"},{"location":"service-accounts/","text":"Service Accounts \u00b6 Service Account for EventSources \u00b6 A Service Account can be specified in the EventSource object with spec.template.serviceAccountName , however it is not needed for all the EventSource types except resource . For a resource EventSource, you need to specify a Service Account and give it list and watch permissions for the resource being watched. For example, if you want to watch actions on Deployment objects, you need to: Create a Service Account. kubectl -n your-namespace create sa my-sa Grant RBAC privileges to it. kubectl -n your-namespace create role deployments-watcher --verb=list,watch --resource=deployments.apps kubectl -n your-namespace create rolebinding deployments-watcher-role-binding --role=deployments-watcher --serviceaccount=your-namespace:my-sa or (if you want to watch cluster scope) kubectl create clusterrole deployments-watcher --verb=list,watch --resource=deployments.apps kubectl create clusterrolebinding deployments-watcher-clusterrole-binding --clusterrole=deployments-watcher --serviceaccount=your-namespace:my-sa Service Account for Sensors \u00b6 A Service Account also can be specified in a Sensor object via spec.template.serviceAccountName , this is only needed when k8s trigger or argoWorkflow trigger is defined in the Sensor object. The sensor examples provided by us use operate-workflow-sa service account to execute the triggers, but it has more permissions than needed, and you may want to limit those privileges based on your use-case. It's always a good practice to create a service account with minimum privileges to execute it. Argo Workflow Trigger \u00b6 To submit a workflow through argoWorkflow trigger, make sure to grant the Service Account create and list access to workflows.argoproj.io . To resubmit , retry , resume or suspend a workflow through argoWorkflow trigger, the service account needs update and get access to workflows.argoproj.io . K8s Resource Trigger \u00b6 To trigger a K8s resource including workflows.argoproj.io through k8s trigger, make sure to grant create permission to that resource. AWS Lambda, HTTP, Slack, NATS, Kafka, and OpenWhisk Triggers \u00b6 For these triggers, you don't need to specify a Service Account to the Sensor. Service Account for Triggered Workflows (or other K8s resources) \u00b6 When the Sensor is used to trigger a Workflow, you might need to configure the Service Account used in the Workflow spec ( NOT spec.template.serviceAccountName ) following Argo Workflow instructions . If it is used to trigger other K8s resources (i.e. a Deployment), make sure to follow least privilege principle.","title":"Service Accounts"},{"location":"service-accounts/#service-accounts","text":"","title":"Service Accounts"},{"location":"service-accounts/#service-account-for-eventsources","text":"A Service Account can be specified in the EventSource object with spec.template.serviceAccountName , however it is not needed for all the EventSource types except resource . For a resource EventSource, you need to specify a Service Account and give it list and watch permissions for the resource being watched. For example, if you want to watch actions on Deployment objects, you need to: Create a Service Account. kubectl -n your-namespace create sa my-sa Grant RBAC privileges to it. kubectl -n your-namespace create role deployments-watcher --verb=list,watch --resource=deployments.apps kubectl -n your-namespace create rolebinding deployments-watcher-role-binding --role=deployments-watcher --serviceaccount=your-namespace:my-sa or (if you want to watch cluster scope) kubectl create clusterrole deployments-watcher --verb=list,watch --resource=deployments.apps kubectl create clusterrolebinding deployments-watcher-clusterrole-binding --clusterrole=deployments-watcher --serviceaccount=your-namespace:my-sa","title":"Service Account for EventSources"},{"location":"service-accounts/#service-account-for-sensors","text":"A Service Account also can be specified in a Sensor object via spec.template.serviceAccountName , this is only needed when k8s trigger or argoWorkflow trigger is defined in the Sensor object. The sensor examples provided by us use operate-workflow-sa service account to execute the triggers, but it has more permissions than needed, and you may want to limit those privileges based on your use-case. It's always a good practice to create a service account with minimum privileges to execute it.","title":"Service Account for Sensors"},{"location":"service-accounts/#argo-workflow-trigger","text":"To submit a workflow through argoWorkflow trigger, make sure to grant the Service Account create and list access to workflows.argoproj.io . To resubmit , retry , resume or suspend a workflow through argoWorkflow trigger, the service account needs update and get access to workflows.argoproj.io .","title":"Argo Workflow Trigger"},{"location":"service-accounts/#k8s-resource-trigger","text":"To trigger a K8s resource including workflows.argoproj.io through k8s trigger, make sure to grant create permission to that resource.","title":"K8s Resource Trigger"},{"location":"service-accounts/#aws-lambda-http-slack-nats-kafka-and-openwhisk-triggers","text":"For these triggers, you don't need to specify a Service Account to the Sensor.","title":"AWS Lambda, HTTP, Slack, NATS, Kafka, and OpenWhisk Triggers"},{"location":"service-accounts/#service-account-for-triggered-workflows-or-other-k8s-resources","text":"When the Sensor is used to trigger a Workflow, you might need to configure the Service Account used in the Workflow spec ( NOT spec.template.serviceAccountName ) following Argo Workflow instructions . If it is used to trigger other K8s resources (i.e. a Deployment), make sure to follow least privilege principle.","title":"Service Account for Triggered Workflows (or other K8s resources)"},{"location":"validating-admission-webhook/","text":"Validating Admission Webhook \u00b6 v1.3 and after Overview \u00b6 Starting from v1.3, a Validating Admission Webhook is introduced to the project. To install the validating webhook, use following command (change the version): kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/ { version } /manifests/install-validating-webhook.yaml Benefits \u00b6 Using the validating webhook has following benefits: It notifies the error at the time applying the faulty spec, so that you don't need to check the CRD object status field to see if there's any condition errors later on. e.g. Creating an exotic NATS EventBus without ClusterID specified: cat < apiVersion: argoproj.io/v1alpha1 > kind: EventBus > metadata: > name: default > spec: > nats: > exotic: {} > EOF Error from server ( BadRequest ) : error when creating \"STDIN\" : admission webhook \"webhook.argo-events.argoproj.io\" denied the request: \"spec.nats.exotic.clusterID\" is missing Spec updating behavior can be validated. Updating existing specs requires more validation, besides checking if the new spec is valid, we also need to check if there's any immutable fields being updated. This can not be done in the controller reconciliation, but we can do it by using the validating webhook. For example, updating Auth Strategy for a native NATS EventBus is prohibited, a denied response as following will be returned. Error from server ( BadRequest ) : error when applying patch: { \"metadata\" : { \"annotations\" : { \"kubectl.kubernetes.io/last-applied-configuration\" : \"{\\\"apiVersion\\\":\\\"argoproj.io/v1alpha1\\\",\\\"kind\\\":\\\"EventBus\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"default\\\",\\\"namespace\\\":\\\"argo-events\\\"},\\\"spec\\\":{\\\"nats\\\":{\\\"native\\\":{\\\"replicas\\\":3}}}}\\n\" }} , \"spec\" : { \"nats\" : { \"native\" : { \"auth\" :null, \"maxAge\" :null, \"securityContext\" :null }}}} to: Resource: \"argoproj.io/v1alpha1, Resource=eventbus\" , GroupVersionKind: \"argoproj.io/v1alpha1, Kind=EventBus\" Name: \"default\" , Namespace: \"argo-events\" for : \"test-eventbus.yaml\" : admission webhook \"webhook.argo-events.argoproj.io\" denied the request: \"spec.nats.native.auth\" is immutable, can not be updated","title":"Validating Admission Webhook"},{"location":"validating-admission-webhook/#validating-admission-webhook","text":"v1.3 and after","title":"Validating Admission Webhook"},{"location":"validating-admission-webhook/#overview","text":"Starting from v1.3, a Validating Admission Webhook is introduced to the project. To install the validating webhook, use following command (change the version): kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/ { version } /manifests/install-validating-webhook.yaml","title":"Overview"},{"location":"validating-admission-webhook/#benefits","text":"Using the validating webhook has following benefits: It notifies the error at the time applying the faulty spec, so that you don't need to check the CRD object status field to see if there's any condition errors later on. e.g. Creating an exotic NATS EventBus without ClusterID specified: cat < apiVersion: argoproj.io/v1alpha1 > kind: EventBus > metadata: > name: default > spec: > nats: > exotic: {} > EOF Error from server ( BadRequest ) : error when creating \"STDIN\" : admission webhook \"webhook.argo-events.argoproj.io\" denied the request: \"spec.nats.exotic.clusterID\" is missing Spec updating behavior can be validated. Updating existing specs requires more validation, besides checking if the new spec is valid, we also need to check if there's any immutable fields being updated. This can not be done in the controller reconciliation, but we can do it by using the validating webhook. For example, updating Auth Strategy for a native NATS EventBus is prohibited, a denied response as following will be returned. Error from server ( BadRequest ) : error when applying patch: { \"metadata\" : { \"annotations\" : { \"kubectl.kubernetes.io/last-applied-configuration\" : \"{\\\"apiVersion\\\":\\\"argoproj.io/v1alpha1\\\",\\\"kind\\\":\\\"EventBus\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"default\\\",\\\"namespace\\\":\\\"argo-events\\\"},\\\"spec\\\":{\\\"nats\\\":{\\\"native\\\":{\\\"replicas\\\":3}}}}\\n\" }} , \"spec\" : { \"nats\" : { \"native\" : { \"auth\" :null, \"maxAge\" :null, \"securityContext\" :null }}}} to: Resource: \"argoproj.io/v1alpha1, Resource=eventbus\" , GroupVersionKind: \"argoproj.io/v1alpha1, Kind=EventBus\" Name: \"default\" , Namespace: \"argo-events\" for : \"test-eventbus.yaml\" : admission webhook \"webhook.argo-events.argoproj.io\" denied the request: \"spec.nats.native.auth\" is immutable, can not be updated","title":"Benefits"},{"location":"concepts/architecture/","text":"Architecture \u00b6 Main components of Argo Events are: Event Source Sensor Eventbus Trigger","title":"Architecture"},{"location":"concepts/architecture/#architecture","text":"Main components of Argo Events are: Event Source Sensor Eventbus Trigger","title":"Architecture"},{"location":"concepts/event_source/","text":"Event Source \u00b6 An EventSource defines the configurations required to consume events from external sources like AWS SNS, SQS, GCP PubSub, Webhooks, etc. It further transforms the events into the cloudevents and dispatches them over to the eventbus. Available event-sources: AMQP AWS SNS AWS SQS Azure Events Hub Azure Queue Storage Bitbucket Bitbucket Server Calendar Emitter File Based Events GCP PubSub Generic EventSource GitHub GitLab HDFS K8s Resources Kafka Minio NATS NetApp StorageGrid MQTT NSQ Pulsar Redis Slack Stripe Webhooks Specification \u00b6 The complete specification is available here . Examples \u00b6 Examples are located under examples/event-sources .","title":"Event Source"},{"location":"concepts/event_source/#event-source","text":"An EventSource defines the configurations required to consume events from external sources like AWS SNS, SQS, GCP PubSub, Webhooks, etc. It further transforms the events into the cloudevents and dispatches them over to the eventbus. Available event-sources: AMQP AWS SNS AWS SQS Azure Events Hub Azure Queue Storage Bitbucket Bitbucket Server Calendar Emitter File Based Events GCP PubSub Generic EventSource GitHub GitLab HDFS K8s Resources Kafka Minio NATS NetApp StorageGrid MQTT NSQ Pulsar Redis Slack Stripe Webhooks","title":"Event Source"},{"location":"concepts/event_source/#specification","text":"The complete specification is available here .","title":"Specification"},{"location":"concepts/event_source/#examples","text":"Examples are located under examples/event-sources .","title":"Examples"},{"location":"concepts/eventbus/","text":"EventBus \u00b6 The EventBus acts as the transport layer of Argo-Events by connecting the EventSources and Sensors. EventSources publish the events while the Sensors subscribe to the events to execute triggers. There are three implementations of the EventBus: NATS (deprecated), Jetstream , and Kafka .","title":"EventBus"},{"location":"concepts/eventbus/#eventbus","text":"The EventBus acts as the transport layer of Argo-Events by connecting the EventSources and Sensors. EventSources publish the events while the Sensors subscribe to the events to execute triggers. There are three implementations of the EventBus: NATS (deprecated), Jetstream , and Kafka .","title":"EventBus"},{"location":"concepts/sensor/","text":"Sensor \u00b6 Sensor defines a set of event dependencies (inputs) and triggers (outputs). It listens to events on the eventbus and acts as an event dependency manager to resolve and execute the triggers. Event dependency \u00b6 A dependency is an event the sensor is waiting to happen. Specification \u00b6 Complete specification is available here . Examples \u00b6 Examples are located under examples/sensors .","title":"Sensor"},{"location":"concepts/sensor/#sensor","text":"Sensor defines a set of event dependencies (inputs) and triggers (outputs). It listens to events on the eventbus and acts as an event dependency manager to resolve and execute the triggers.","title":"Sensor"},{"location":"concepts/sensor/#event-dependency","text":"A dependency is an event the sensor is waiting to happen.","title":"Event dependency"},{"location":"concepts/sensor/#specification","text":"Complete specification is available here .","title":"Specification"},{"location":"concepts/sensor/#examples","text":"Examples are located under examples/sensors .","title":"Examples"},{"location":"concepts/trigger/","text":"Trigger \u00b6 A Trigger is the resource/workload executed by the sensor once the event dependencies are resolved. Trigger Types \u00b6 AWS Lambda Apache OpenWhisk Argo Rollouts Argo Workflows Custom - Build Your Own HTTP Requests - Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) Kafka Messages NATS Messages Slack Notifications Azure Event Hubs Messages Create any Kubernetes Objects Log (for debugging event bus messages)","title":"Trigger"},{"location":"concepts/trigger/#trigger","text":"A Trigger is the resource/workload executed by the sensor once the event dependencies are resolved.","title":"Trigger"},{"location":"concepts/trigger/#trigger-types","text":"AWS Lambda Apache OpenWhisk Argo Rollouts Argo Workflows Custom - Build Your Own HTTP Requests - Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) Kafka Messages NATS Messages Slack Notifications Azure Event Hubs Messages Create any Kubernetes Objects Log (for debugging event bus messages)","title":"Trigger Types"},{"location":"eventbus/antiaffinity/","text":"Anti-affinity \u00b6 Kubernetes offers a concept of anti-affinity , meaning that pods are scheduled on separate nodes. The anti-affinity can either be \"best effort\" or a hard requirement. A best effort and a hard requirement node anti-affinity config look like below, if you want to do AZ (Availability Zone) anti-affinity, change the value of topologyKey from kubernetes.io/hostname to topology.kubernetes.io/zone . # Best effort affinity : podAntiAffinity : preferredDuringSchedulingIgnoredDuringExecution : - podAffinityTerm : labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname weight : 100 # Hard requirement affinity : podAntiAffinity : requiredDuringSchedulingIgnoredDuringExecution : - labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname","title":"Antiaffinity"},{"location":"eventbus/antiaffinity/#anti-affinity","text":"Kubernetes offers a concept of anti-affinity , meaning that pods are scheduled on separate nodes. The anti-affinity can either be \"best effort\" or a hard requirement. A best effort and a hard requirement node anti-affinity config look like below, if you want to do AZ (Availability Zone) anti-affinity, change the value of topologyKey from kubernetes.io/hostname to topology.kubernetes.io/zone . # Best effort affinity : podAntiAffinity : preferredDuringSchedulingIgnoredDuringExecution : - podAffinityTerm : labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname weight : 100 # Hard requirement affinity : podAntiAffinity : requiredDuringSchedulingIgnoredDuringExecution : - labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname","title":"Anti-affinity"},{"location":"eventbus/eventbus/","text":"EventBus \u00b6 v0.17.0 and after EventBus is a Kubernetes Custom Resource which is used for event transmission from EventSources to Sensors. Currently, EventBus is backed by NATS , including both their NATS Streaming service, their newer Jetstream service, and Kafka. In the future, this can be expanded to support other technologies as well. EventBus is namespaced; an EventBus object is required in a namespace to make EventSource and Sensor work. The common practice is to create an EventBus named default in the namespace. If you want to use a different name, or you want to have multiple EventBus in one namespace, you need to specify eventBusName in the spec of EventSource and Sensor correspondingly, so that they can find the right one. See EventSource spec and Sensor spec .","title":"EventBus"},{"location":"eventbus/eventbus/#eventbus","text":"v0.17.0 and after EventBus is a Kubernetes Custom Resource which is used for event transmission from EventSources to Sensors. Currently, EventBus is backed by NATS , including both their NATS Streaming service, their newer Jetstream service, and Kafka. In the future, this can be expanded to support other technologies as well. EventBus is namespaced; an EventBus object is required in a namespace to make EventSource and Sensor work. The common practice is to create an EventBus named default in the namespace. If you want to use a different name, or you want to have multiple EventBus in one namespace, you need to specify eventBusName in the spec of EventSource and Sensor correspondingly, so that they can find the right one. See EventSource spec and Sensor spec .","title":"EventBus"},{"location":"eventbus/jetstream/","text":"Jetstream \u00b6 Jetstream is the latest streaming server implemented by the NATS community, with improvements from the original NATS Streaming (which will eventually be deprecated). A simplest Jetstream EventBus example: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : jetstream : version : latest # Do NOT use \"latest\" but a specific version in your real deployment # See: https://argoproj.github.io/argo-events/eventbus/jetstream/#version The example above brings up a Jetstream StatefulSet with 3 replicas in the namespace. Properties \u00b6 Check here for the full spec of jetstream . version \u00b6 The version number specified in the example above is the release number for the NATS server. We will support some subset of these as we've tried them out and only plan to upgrade them as needed. The list of available versions is managed by the controller manager ConfigMap, which can be updated to support new versions. kubectl get configmap argo-events-controller-config -o yaml Check here for a list of configurable features per version. A more involved example \u00b6 Another example with more configuration: apiVersion: argoproj.io/v1alpha1 kind: EventBus metadata: name: default spec: jetstream: version: latest # Do NOT use \"latest\" but a specific version in your real deployment replicas: 5 persistence: # optional storageClassName: standard accessMode: ReadWriteOnce volumeSize: 10Gi streamConfig: | # see default values in argo-events-controller-config maxAge: 24h settings: | max_file_store: 1GB # see default values in argo-events-controller-config startArgs: - \"-D\" # debug-level logs Security \u00b6 For Jetstream, TLS is turned on for all client-server communication as well as between Jetstream nodes. In addition, for client-server communication we by default use password authentication (and because TLS is turned on, the password is encrypted). How it works under the hood \u00b6 Jetstream has the concept of a Stream, and Subjects (i.e. topics) which are used on a Stream. From the documentation: \u201cEach Stream defines how messages are stored and what the limits (duration, size, interest) of the retention are.\u201d For Argo Events, we have one Stream called \"default\" with a single set of settings, but we have multiple subjects, each of which is named default.. . Sensors subscribe to the subjects they need using durable consumers. Exotic \u00b6 To use an existing JetStream service, follow the example below. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : jetstreamExotic : url : nats://xxxxx:xxx accessSecret : name : my-secret-name key : secret-key streamConfig : \"\"","title":"Jetstream"},{"location":"eventbus/jetstream/#jetstream","text":"Jetstream is the latest streaming server implemented by the NATS community, with improvements from the original NATS Streaming (which will eventually be deprecated). A simplest Jetstream EventBus example: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : jetstream : version : latest # Do NOT use \"latest\" but a specific version in your real deployment # See: https://argoproj.github.io/argo-events/eventbus/jetstream/#version The example above brings up a Jetstream StatefulSet with 3 replicas in the namespace.","title":"Jetstream"},{"location":"eventbus/jetstream/#properties","text":"Check here for the full spec of jetstream .","title":"Properties"},{"location":"eventbus/jetstream/#version","text":"The version number specified in the example above is the release number for the NATS server. We will support some subset of these as we've tried them out and only plan to upgrade them as needed. The list of available versions is managed by the controller manager ConfigMap, which can be updated to support new versions. kubectl get configmap argo-events-controller-config -o yaml Check here for a list of configurable features per version.","title":"version"},{"location":"eventbus/jetstream/#a-more-involved-example","text":"Another example with more configuration: apiVersion: argoproj.io/v1alpha1 kind: EventBus metadata: name: default spec: jetstream: version: latest # Do NOT use \"latest\" but a specific version in your real deployment replicas: 5 persistence: # optional storageClassName: standard accessMode: ReadWriteOnce volumeSize: 10Gi streamConfig: | # see default values in argo-events-controller-config maxAge: 24h settings: | max_file_store: 1GB # see default values in argo-events-controller-config startArgs: - \"-D\" # debug-level logs","title":"A more involved example"},{"location":"eventbus/jetstream/#security","text":"For Jetstream, TLS is turned on for all client-server communication as well as between Jetstream nodes. In addition, for client-server communication we by default use password authentication (and because TLS is turned on, the password is encrypted).","title":"Security"},{"location":"eventbus/jetstream/#how-it-works-under-the-hood","text":"Jetstream has the concept of a Stream, and Subjects (i.e. topics) which are used on a Stream. From the documentation: \u201cEach Stream defines how messages are stored and what the limits (duration, size, interest) of the retention are.\u201d For Argo Events, we have one Stream called \"default\" with a single set of settings, but we have multiple subjects, each of which is named default.. . Sensors subscribe to the subjects they need using durable consumers.","title":"How it works under the hood"},{"location":"eventbus/jetstream/#exotic","text":"To use an existing JetStream service, follow the example below. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : jetstreamExotic : url : nats://xxxxx:xxx accessSecret : name : my-secret-name key : secret-key streamConfig : \"\"","title":"Exotic"},{"location":"eventbus/kafka/","text":"Kafka is a widely used event streaming platform. We recommend using Kafka if you have a lot of events and want to horizontally scale your Sensors. If you are looking to get started quickly with Argo Events we recommend using Jetstream instead. When using a Kafka EventBus you must already have a Kafka cluster set up and topics created (unless you have auto create enabled, see topics below). Example \u00b6 kind : EventBus metadata : name : default spec : kafka : url : kafka:9092 # must be managed independently topic : \"example\" # optional See here for the full specification. Properties \u00b6 url \u00b6 Comma seperated list of kafka broker urls, the kafka broker must be managed independently of Argo Events. topic \u00b6 The topic name, defaults to {namespace-name}-{eventbus-name} . Two additional topics per Sensor are also required, see see topics below for more information. version \u00b6 Kafka version, we recommend not manually setting this field in most circumstances. Defaults to the oldest supported stable version. tls \u00b6 Enables TLS on the kafka connection. tls: caCertSecret: name: my-secret key: ca-cert-key clientCertSecret: name: my-secret key: client-cert-key clientKeySecret: name: my-secret key: client-key-key sasl \u00b6 Enables SASL authentication on the kafka connection. sasl: mechanism: PLAIN passwordSecret: key: password name: my-user userSecret: key: user name: my-user consumerGroup.groupName \u00b6 Consumer group name, defaults to {namespace-name}-{sensor-name} . consumerGroup.rebalanceStrategy \u00b6 The kafka rebalance strategy, can be one of: sticky, roundrobin, range. Defaults to range. consumerGroup.startOldest \u00b6 When starting up a new group do we want to start from the oldest event (true) or the newest event (false). Defaults to false Security \u00b6 You can enable TLS or SASL authentication, see above for configuration details. You must enable these features in your Kafka Cluster and make the certifactes/credentials available in a Kubernetes secret. Topics \u00b6 The Kafka EventBus requires one event topic and two additional topics (trigger and action) per Sensor. These topics will not be created automatically unless the Kafka auto.create.topics.enable cluster configuration is set to true, otherwise it is your responsibility to create these topics. If a topic does not exist and cannot be automatically created, the EventSource and/or Sensor will exit with an error. If you want to take advantage of the horizontal scaling enabled by the Kafka EventBus be sure to create topics with more than one partition. By default the topics are named as follows. topic name event {namespace}-{eventbus-name} trigger {namespace}-{eventbus-name}-{sensor-name}-trigger action {namespace}-{eventbus-name}-{sensor-name}-action If a topic name is specified in the EventBus specification, then the topics are named as follows. topic name event {spec.kafka.topic} trigger {spec.kafka.topic}-{sensor-name}-trigger action {spec.kafka.topic}-{sensor-name}-action Horizontal Scaling and Leader Election \u00b6 Sensors that use a Kafka EventBus can scale horizontally. Specifiying replicas greater than one will result in all Sensor pods actively processing events. However, an EventSource that uses a Kafka EventBus cannot necessarily be horizontally scaled in an active-active manner, see EventSource HA for more details. In an active-passive scenario a Kubernetes leader election is used.","title":"Kafka"},{"location":"eventbus/kafka/#example","text":"kind : EventBus metadata : name : default spec : kafka : url : kafka:9092 # must be managed independently topic : \"example\" # optional See here for the full specification.","title":"Example"},{"location":"eventbus/kafka/#properties","text":"","title":"Properties"},{"location":"eventbus/kafka/#url","text":"Comma seperated list of kafka broker urls, the kafka broker must be managed independently of Argo Events.","title":"url"},{"location":"eventbus/kafka/#topic","text":"The topic name, defaults to {namespace-name}-{eventbus-name} . Two additional topics per Sensor are also required, see see topics below for more information.","title":"topic"},{"location":"eventbus/kafka/#version","text":"Kafka version, we recommend not manually setting this field in most circumstances. Defaults to the oldest supported stable version.","title":"version"},{"location":"eventbus/kafka/#tls","text":"Enables TLS on the kafka connection. tls: caCertSecret: name: my-secret key: ca-cert-key clientCertSecret: name: my-secret key: client-cert-key clientKeySecret: name: my-secret key: client-key-key","title":"tls"},{"location":"eventbus/kafka/#sasl","text":"Enables SASL authentication on the kafka connection. sasl: mechanism: PLAIN passwordSecret: key: password name: my-user userSecret: key: user name: my-user","title":"sasl"},{"location":"eventbus/kafka/#consumergroupgroupname","text":"Consumer group name, defaults to {namespace-name}-{sensor-name} .","title":"consumerGroup.groupName"},{"location":"eventbus/kafka/#consumergrouprebalancestrategy","text":"The kafka rebalance strategy, can be one of: sticky, roundrobin, range. Defaults to range.","title":"consumerGroup.rebalanceStrategy"},{"location":"eventbus/kafka/#consumergroupstartoldest","text":"When starting up a new group do we want to start from the oldest event (true) or the newest event (false). Defaults to false","title":"consumerGroup.startOldest"},{"location":"eventbus/kafka/#security","text":"You can enable TLS or SASL authentication, see above for configuration details. You must enable these features in your Kafka Cluster and make the certifactes/credentials available in a Kubernetes secret.","title":"Security"},{"location":"eventbus/kafka/#topics","text":"The Kafka EventBus requires one event topic and two additional topics (trigger and action) per Sensor. These topics will not be created automatically unless the Kafka auto.create.topics.enable cluster configuration is set to true, otherwise it is your responsibility to create these topics. If a topic does not exist and cannot be automatically created, the EventSource and/or Sensor will exit with an error. If you want to take advantage of the horizontal scaling enabled by the Kafka EventBus be sure to create topics with more than one partition. By default the topics are named as follows. topic name event {namespace}-{eventbus-name} trigger {namespace}-{eventbus-name}-{sensor-name}-trigger action {namespace}-{eventbus-name}-{sensor-name}-action If a topic name is specified in the EventBus specification, then the topics are named as follows. topic name event {spec.kafka.topic} trigger {spec.kafka.topic}-{sensor-name}-trigger action {spec.kafka.topic}-{sensor-name}-action","title":"Topics"},{"location":"eventbus/kafka/#horizontal-scaling-and-leader-election","text":"Sensors that use a Kafka EventBus can scale horizontally. Specifiying replicas greater than one will result in all Sensor pods actively processing events. However, an EventSource that uses a Kafka EventBus cannot necessarily be horizontally scaled in an active-active manner, see EventSource HA for more details. In an active-passive scenario a Kubernetes leader election is used.","title":"Horizontal Scaling and Leader Election"},{"location":"eventbus/stan/","text":"NATS Streaming \u00b6 You can create a native NATS EventBus, or connect to an existing NATS Streaming service with exotic NATS EventBus. Native \u00b6 A simplest native NATS EventBus example: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : {} The example above brings up a NATS Streaming StatefulSet with 3 replicas in the namespace. The following example shows an EventBus with token auth strategy and persistent volumes. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : replicas : 3 # optional, defaults to 3, and requires minimal 3 auth : token # optional, default to none persistence : # optional storageClassName : standard accessMode : ReadWriteOnce volumeSize : 10Gi Properties \u00b6 Check here for the full spec of native . replicas - StatefulSet replicas, defaults to 3, and requires minimal 3. According to NATS Streaming doc , the size should probably be limited to 3 to 5, and odd number is recommended. auth - The strategy that clients connect to NATS Streaming service, none or token is currently supported, defaults to none . If token strategy is used, the system will generate a token and store it in K8s secrets (one for client, one for server), EventSource and Sensor PODs will automatically load the client secret and use it to connect to the EventBus. antiAffinity - Whether to create the StatefulSet PODs with anti-affinity rule. Deprecated in v1.3 , will be removed in v1.5 , use affinity instead. nodeSelector - Node selector for StatefulSet PODs. tolerations - Tolerations for the PODs. persistence - Whether to use a persistence volume for the data. securityContext - POD level security attributes and common container settings. maxAge - Max Age of existing messages, i.e. 72h , 4h35m , defaults to 72h . maxMsgs - Max number of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1000000. maxBytes - Total size of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1GB. maxSubs - Maximum number of subscriptions, 0 means unlimited. Defaults to 1000. maxPayload - Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB. imagePullSecrets - Secrets used to pull images. serviceAccountName - In case your firm requires to use a service account other than default . priority - Priority of the StatefulSet PODs. priorityClassName - PriorityClassName of the StatefulSet PODs. affinity - Affinity settings for the StatefulSet PODs. More About Native NATS EventBus \u00b6 Messages limit per channel defaults to 1,000,000. It could be customized by setting spec.nats.native.maxMsgs , 0 means unlimited. Message bytes per channel defaults to 1GB , setting spec.nats.native.maxBytes to customize it, \"0\" means unlimited. Max age of messages is 72 hours, which means messages over 72 hours will be deleted automatically. It can be customized by setting spec.nats.native.maxAge , i.e. 240h . Max subscription number is defaults to 1000 , it could be customized by setting spec.nats.native.maxSubs . Exotic \u00b6 To use an existing NATS Streaming service, follow the example below. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : exotic : url : nats://xxxxx:xxx clusterID : cluster-id auth : token accessSecret : name : my-secret-name key : secret-key More Information \u00b6 To view a finalized EventBus config: kubectl get eventbus default -o json | jq '.status.config' A sample result: { \"nats\" : { \"accessSecret\" : { \"key\" : \"client-auth\" , \"name\" : \"eventbus-default-client\" }, \"auth\" : \"token\" , \"clusterID\" : \"eventbus-default\" , \"url\" : \"nats://eventbus-default-stan-svc:4222\" } } All the events in a namespace are published to same channel/subject/topic named eventbus-{namespace} in the EventBus.","title":"Stan"},{"location":"eventbus/stan/#nats-streaming","text":"You can create a native NATS EventBus, or connect to an existing NATS Streaming service with exotic NATS EventBus.","title":"NATS Streaming"},{"location":"eventbus/stan/#native","text":"A simplest native NATS EventBus example: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : {} The example above brings up a NATS Streaming StatefulSet with 3 replicas in the namespace. The following example shows an EventBus with token auth strategy and persistent volumes. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : replicas : 3 # optional, defaults to 3, and requires minimal 3 auth : token # optional, default to none persistence : # optional storageClassName : standard accessMode : ReadWriteOnce volumeSize : 10Gi","title":"Native"},{"location":"eventbus/stan/#properties","text":"Check here for the full spec of native . replicas - StatefulSet replicas, defaults to 3, and requires minimal 3. According to NATS Streaming doc , the size should probably be limited to 3 to 5, and odd number is recommended. auth - The strategy that clients connect to NATS Streaming service, none or token is currently supported, defaults to none . If token strategy is used, the system will generate a token and store it in K8s secrets (one for client, one for server), EventSource and Sensor PODs will automatically load the client secret and use it to connect to the EventBus. antiAffinity - Whether to create the StatefulSet PODs with anti-affinity rule. Deprecated in v1.3 , will be removed in v1.5 , use affinity instead. nodeSelector - Node selector for StatefulSet PODs. tolerations - Tolerations for the PODs. persistence - Whether to use a persistence volume for the data. securityContext - POD level security attributes and common container settings. maxAge - Max Age of existing messages, i.e. 72h , 4h35m , defaults to 72h . maxMsgs - Max number of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1000000. maxBytes - Total size of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1GB. maxSubs - Maximum number of subscriptions, 0 means unlimited. Defaults to 1000. maxPayload - Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB. imagePullSecrets - Secrets used to pull images. serviceAccountName - In case your firm requires to use a service account other than default . priority - Priority of the StatefulSet PODs. priorityClassName - PriorityClassName of the StatefulSet PODs. affinity - Affinity settings for the StatefulSet PODs.","title":"Properties"},{"location":"eventbus/stan/#more-about-native-nats-eventbus","text":"Messages limit per channel defaults to 1,000,000. It could be customized by setting spec.nats.native.maxMsgs , 0 means unlimited. Message bytes per channel defaults to 1GB , setting spec.nats.native.maxBytes to customize it, \"0\" means unlimited. Max age of messages is 72 hours, which means messages over 72 hours will be deleted automatically. It can be customized by setting spec.nats.native.maxAge , i.e. 240h . Max subscription number is defaults to 1000 , it could be customized by setting spec.nats.native.maxSubs .","title":"More About Native NATS EventBus"},{"location":"eventbus/stan/#exotic","text":"To use an existing NATS Streaming service, follow the example below. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : exotic : url : nats://xxxxx:xxx clusterID : cluster-id auth : token accessSecret : name : my-secret-name key : secret-key","title":"Exotic"},{"location":"eventbus/stan/#more-information","text":"To view a finalized EventBus config: kubectl get eventbus default -o json | jq '.status.config' A sample result: { \"nats\" : { \"accessSecret\" : { \"key\" : \"client-auth\" , \"name\" : \"eventbus-default-client\" }, \"auth\" : \"token\" , \"clusterID\" : \"eventbus-default\" , \"url\" : \"nats://eventbus-default-stan-svc:4222\" } } All the events in a namespace are published to same channel/subject/topic named eventbus-{namespace} in the EventBus.","title":"More Information"},{"location":"eventsources/calendar-catch-up/","text":"Calender EventSource Catch Up \u00b6 Catch-up feature allow Calender eventsources to execute the missed schedules from last run. Enable Catch-up for Calendar EventSource \u00b6 User can configure catch up on each events in eventsource. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : calendar spec : template : serviceAccountName : configmap-sa # assign a service account with read, write permissions on configmaps calendar : example-with-catch-up : # Catchup the missed events from last Event timestamp. last event will be persisted in configmap. schedule : \"* * * * *\" persistence : catchup : enabled : true # Check missed schedules from last persisted event time on every start maxDuration : 5m # maximum amount of duration go back for the catch-up configMap : # Configmap for persist the last successful event timestamp createIfNotExist : true name : test-configmap Last calender event persisted in configured configmap. Same configmap can be used by multiple events configuration. data : calendar.example-with-catch-up : '{\"eventTime\":\"2020-10-19 22:50:00.0003192 +0000 UTC m=+683.567066901\"}' Service Account \u00b6 To make Calendar EventSource catch-up work, a Service Account with proper RBAC settings needs to be provided. If the configMap is not existing, and createIfNotExist: true is set, a Service Account bound with following Role is required. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : example-configmap-access-role rules : - apiGroups : - \"\" resources : - configmaps verbs : - get - create - update If the configmap is already existing, create can be removed from the verbs list. Disable the catchup \u00b6 Set false to catchup-->enabled element catchup : enabled : false","title":"Calender EventSource Catch Up"},{"location":"eventsources/calendar-catch-up/#calender-eventsource-catch-up","text":"Catch-up feature allow Calender eventsources to execute the missed schedules from last run.","title":"Calender EventSource Catch Up"},{"location":"eventsources/calendar-catch-up/#enable-catch-up-for-calendar-eventsource","text":"User can configure catch up on each events in eventsource. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : calendar spec : template : serviceAccountName : configmap-sa # assign a service account with read, write permissions on configmaps calendar : example-with-catch-up : # Catchup the missed events from last Event timestamp. last event will be persisted in configmap. schedule : \"* * * * *\" persistence : catchup : enabled : true # Check missed schedules from last persisted event time on every start maxDuration : 5m # maximum amount of duration go back for the catch-up configMap : # Configmap for persist the last successful event timestamp createIfNotExist : true name : test-configmap Last calender event persisted in configured configmap. Same configmap can be used by multiple events configuration. data : calendar.example-with-catch-up : '{\"eventTime\":\"2020-10-19 22:50:00.0003192 +0000 UTC m=+683.567066901\"}'","title":"Enable Catch-up for Calendar EventSource"},{"location":"eventsources/calendar-catch-up/#service-account","text":"To make Calendar EventSource catch-up work, a Service Account with proper RBAC settings needs to be provided. If the configMap is not existing, and createIfNotExist: true is set, a Service Account bound with following Role is required. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : example-configmap-access-role rules : - apiGroups : - \"\" resources : - configmaps verbs : - get - create - update If the configmap is already existing, create can be removed from the verbs list.","title":"Service Account"},{"location":"eventsources/calendar-catch-up/#disable-the-catchup","text":"Set false to catchup-->enabled element catchup : enabled : false","title":"Disable the catchup"},{"location":"eventsources/filtering/","text":"Filtering EventSources \u00b6 When event sources watch events from external data sources (ie. Kafka topics), it will ingest all messages. With filtering, we are able to apply constraints and determine if the event should be published or skipped. This is achieved by evaluating an expression in the EventSource spec. Fields \u00b6 A filter in an example Kafka EventSource: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : kafka spec : kafka : example : url : kafka.argo-events:9092 topic : topic-2 jsonBody : true partition : \"1\" filter : # filter field expression : \"(body.id == 4) && (body.name != 'Joe')\" #expression to be evaluated connectionBackoff : duration : 10s steps : 5 factor : 2 jitter : 0.2 The expression string is evaluated with the expr package which offers a wide set of basic operators and comparators. Example \u00b6 Creating a Kafka EventSource with filter field present kubectl apply -f examples/event-sources/kafka.yaml -n argo-events Sending an event with passing filter conditions to kafka echo '{\"id\": 4,\"name\": \"John\", \"email\": \"john@intuit.com\", \"department\":{\"id\": 1,\"name\": \"HR\",\"bu\":{\"id\": 2,\"name\" : \"devp\"}}}' | kcat -b localhost:9092 -P -t topic-2 Sending an event with failing filter conditions echo '{\"id\": 2,\"name\": \"Johnson\", \"email\": \"john@intuit.com\", \"department\":{\"id\": 1,\"name\": \"HR\",\"bu\":{\"id\": 2,\"name\" : \"devp\"}}}' | kcat -b localhost:9092 -P -t topic-2 Output \u00b6 Successful logs from kafka event source pod: {\"level\":\"info\",\"ts\":1644017495.0711913,\"logger\":\"argo-events.eventsource\",\"caller\":\"kafka/start.go:217\",\"msg\":\"dispatching event on the data channel...\",\"eventSourceName\":\"kafka\",\"eventSourceType\":\"kafka\",\"eventName\":\"example\",\"partition-id\":\"0\"} {\"level\":\"info\",\"ts\":1644017495.1374986,\"logger\":\"argo-events.eventsource\",\"caller\":\"eventsources/eventing.go:514\",\"msg\":\"succeeded to publish an event\",\"eventSourceName\":\"kafka\",\"eventName\":\"example\",\"eventSourceType\":\"kafka\",\"eventID\":\"kafka:example:kafka-broker:9092:topic-2:0:7\"}","title":"Filtering EventSources"},{"location":"eventsources/filtering/#filtering-eventsources","text":"When event sources watch events from external data sources (ie. Kafka topics), it will ingest all messages. With filtering, we are able to apply constraints and determine if the event should be published or skipped. This is achieved by evaluating an expression in the EventSource spec.","title":"Filtering EventSources"},{"location":"eventsources/filtering/#fields","text":"A filter in an example Kafka EventSource: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : kafka spec : kafka : example : url : kafka.argo-events:9092 topic : topic-2 jsonBody : true partition : \"1\" filter : # filter field expression : \"(body.id == 4) && (body.name != 'Joe')\" #expression to be evaluated connectionBackoff : duration : 10s steps : 5 factor : 2 jitter : 0.2 The expression string is evaluated with the expr package which offers a wide set of basic operators and comparators.","title":"Fields"},{"location":"eventsources/filtering/#example","text":"Creating a Kafka EventSource with filter field present kubectl apply -f examples/event-sources/kafka.yaml -n argo-events Sending an event with passing filter conditions to kafka echo '{\"id\": 4,\"name\": \"John\", \"email\": \"john@intuit.com\", \"department\":{\"id\": 1,\"name\": \"HR\",\"bu\":{\"id\": 2,\"name\" : \"devp\"}}}' | kcat -b localhost:9092 -P -t topic-2 Sending an event with failing filter conditions echo '{\"id\": 2,\"name\": \"Johnson\", \"email\": \"john@intuit.com\", \"department\":{\"id\": 1,\"name\": \"HR\",\"bu\":{\"id\": 2,\"name\" : \"devp\"}}}' | kcat -b localhost:9092 -P -t topic-2","title":"Example"},{"location":"eventsources/filtering/#output","text":"Successful logs from kafka event source pod: {\"level\":\"info\",\"ts\":1644017495.0711913,\"logger\":\"argo-events.eventsource\",\"caller\":\"kafka/start.go:217\",\"msg\":\"dispatching event on the data channel...\",\"eventSourceName\":\"kafka\",\"eventSourceType\":\"kafka\",\"eventName\":\"example\",\"partition-id\":\"0\"} {\"level\":\"info\",\"ts\":1644017495.1374986,\"logger\":\"argo-events.eventsource\",\"caller\":\"eventsources/eventing.go:514\",\"msg\":\"succeeded to publish an event\",\"eventSourceName\":\"kafka\",\"eventName\":\"example\",\"eventSourceType\":\"kafka\",\"eventID\":\"kafka:example:kafka-broker:9092:topic-2:0:7\"}","title":"Output"},{"location":"eventsources/gcp-pubsub/","text":"GCP PubSub \u00b6 Topic And Subscription ID \u00b6 GCP PubSub event source can listen to a PubSub with given topic , or subscriptionID . Here is the logic with different topic and subscriptionID combination. Topic Provided/Existing Sub ID Provided/Existing Actions Yes/Yes Yes/Yes Validate if given topic matches subscription's topic Yes/Yes Yes/No Create a subscription with given ID Yes/Yes No/- Create or re-use subscription with auto generated subID Yes/No Yes/No Create a topic and a subscription with given subID Yes/No Yes/Yes Invalid Yes/No No/- Create a topic, create or re-use subscription w/ auto generated subID No/- Yes/Yes OK No/- Yes/No Invalid Workload Identity \u00b6 If you have configured Workload Identity and want to use it for a PubSub EventSource, leave credentialSecret nil. Full spec is available here . See a PubSub EventSource example . Running With PubSub Emulator \u00b6 You can point this event source at the PubSub Emulator by configuring the PUBSUB_EMULATOR_HOST environment variable for the event source pod. This can be configured on the EventSource resource under the spec.template.container.env key. This option is also documented in the PubSub EventSource example .","title":"GCP PubSub"},{"location":"eventsources/gcp-pubsub/#gcp-pubsub","text":"","title":"GCP PubSub"},{"location":"eventsources/gcp-pubsub/#topic-and-subscription-id","text":"GCP PubSub event source can listen to a PubSub with given topic , or subscriptionID . Here is the logic with different topic and subscriptionID combination. Topic Provided/Existing Sub ID Provided/Existing Actions Yes/Yes Yes/Yes Validate if given topic matches subscription's topic Yes/Yes Yes/No Create a subscription with given ID Yes/Yes No/- Create or re-use subscription with auto generated subID Yes/No Yes/No Create a topic and a subscription with given subID Yes/No Yes/Yes Invalid Yes/No No/- Create a topic, create or re-use subscription w/ auto generated subID No/- Yes/Yes OK No/- Yes/No Invalid","title":"Topic And Subscription ID"},{"location":"eventsources/gcp-pubsub/#workload-identity","text":"If you have configured Workload Identity and want to use it for a PubSub EventSource, leave credentialSecret nil. Full spec is available here . See a PubSub EventSource example .","title":"Workload Identity"},{"location":"eventsources/gcp-pubsub/#running-with-pubsub-emulator","text":"You can point this event source at the PubSub Emulator by configuring the PUBSUB_EMULATOR_HOST environment variable for the event source pod. This can be configured on the EventSource resource under the spec.template.container.env key. This option is also documented in the PubSub EventSource example .","title":"Running With PubSub Emulator"},{"location":"eventsources/generic/","text":"Generic EventSource \u00b6 Generic eventsource extends Argo-Events eventsources via a simple gRPC contract. This is specifically useful when you want to onboard a custom eventsource implementation. Contract \u00b6 In order to qualify as generic eventsource, the eventsource server needs to implement following gRPC contract. syntax = \"proto3\" ; package generic ; service Eventing { rpc StartEventSource ( EventSource ) returns ( stream Event ); } message EventSource { // The event source name . string name = 1 ; // The event source configuration value . bytes config = 2 ; } /** * Represents an event */ message Event { // The event source name . string name = 1 ; // The event payload . bytes payload = 2 ; } The proto file is available here . Architecture \u00b6 Consider a generic eventsource. apiVersion: argoproj.io/v1alpha1 kind: EventSource metadata: name: generic spec: generic: example: insecure: true url: \"generic-event-source-server.argo-events.svc:8080\" config: |- key1: value1 key2: value2 The values placed under config field follows a free-form style and Argo-Events eventsource client is not opinionated about them. Although, it is expected that the eventsource server implemented by the user is able to parse the configuration. Flow \u00b6 The eventsource client connects to the server via the url defined under eventsource spec and sends over the configuration defined under config over an RPC call. The eventsource server then parses the configuration and connects to any external source if required to consume the events. The eventsource server can produce events without connecting to any external source, e.g. a special implementation of calendar events. The events from eventsource server are streamed back to the client. Client then writes the events to the eventbus which are read by the sensor to trigger the workflows. Connection Strategy \u00b6 The eventsource client performs indefinite retries to connect to the eventsource server and receives events over a stream upon successful connection. This also applies when the eventsource server goes down.","title":"Generic EventSource"},{"location":"eventsources/generic/#generic-eventsource","text":"Generic eventsource extends Argo-Events eventsources via a simple gRPC contract. This is specifically useful when you want to onboard a custom eventsource implementation.","title":"Generic EventSource"},{"location":"eventsources/generic/#contract","text":"In order to qualify as generic eventsource, the eventsource server needs to implement following gRPC contract. syntax = \"proto3\" ; package generic ; service Eventing { rpc StartEventSource ( EventSource ) returns ( stream Event ); } message EventSource { // The event source name . string name = 1 ; // The event source configuration value . bytes config = 2 ; } /** * Represents an event */ message Event { // The event source name . string name = 1 ; // The event payload . bytes payload = 2 ; } The proto file is available here .","title":"Contract"},{"location":"eventsources/generic/#architecture","text":"Consider a generic eventsource. apiVersion: argoproj.io/v1alpha1 kind: EventSource metadata: name: generic spec: generic: example: insecure: true url: \"generic-event-source-server.argo-events.svc:8080\" config: |- key1: value1 key2: value2 The values placed under config field follows a free-form style and Argo-Events eventsource client is not opinionated about them. Although, it is expected that the eventsource server implemented by the user is able to parse the configuration.","title":"Architecture"},{"location":"eventsources/generic/#flow","text":"The eventsource client connects to the server via the url defined under eventsource spec and sends over the configuration defined under config over an RPC call. The eventsource server then parses the configuration and connects to any external source if required to consume the events. The eventsource server can produce events without connecting to any external source, e.g. a special implementation of calendar events. The events from eventsource server are streamed back to the client. Client then writes the events to the eventbus which are read by the sensor to trigger the workflows.","title":"Flow"},{"location":"eventsources/generic/#connection-strategy","text":"The eventsource client performs indefinite retries to connect to the eventsource server and receives events over a stream upon successful connection. This also applies when the eventsource server goes down.","title":"Connection Strategy"},{"location":"eventsources/ha/","text":"EventSource High Availability \u00b6 EventSource controller creates a k8s deployment (replica number defaults to 1) for each EventSource object to watch the events. HA can be achieved by setting spec.replicas to a number greater than 1. Some types of the event sources do not allow multiple live clients with same attributes (i.e. multiple clients with same clientID connecting to a NATS server), or multiple event source PODs will generate duplicated events to downstream, so the HA strategies are different for different event sources. Please DO NOT manually scale up the replicas, that might cause unexpected behaviors! Active-Active \u00b6 Active-Active strategy is applied to the following EventSource types. AWS SNS AWS SQS Bitbucket Bitbucket Server GitHub GitLab NetApp Storage GRID Slack Stripe Webhook When spec.replicas is set to N (N > 1), all the N Pods serve traffic. Active-Passive \u00b6 If following EventSource types have spec.replicas > 1 , Active-Passive strategy is used, which means only one Pod serves traffic and the rest ones stand by. One of standby Pods will be automatically elected to be active if the old one is gone. AMQP Azure Events Hub Calendar Emitter GCP PubSub Generic File HDFS Kafka Minio MQTT NATS NSQ Pulsar Redis Resource Kubernetes Leader Election \u00b6 By default, Argo Events will use NATS for the HA leader election except when using a Kafka Eventbus, in which case a kubernetes leader election will be used. If using a different EventBus you can opt-in to a Kubernetes native leader election by specifying the following annotation. annotations : events.argoproj.io/leader-election : k8s To use Kubernetes leader election the following RBAC rules need to be associated with the EventSource ServiceAccount. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : argo-events-leaderelection-role rules : - apiGroups : [ \"coordination.k8s.io\" ] resources : [ \"leases\" ] verbs : [ \"get\" , \"create\" , \"update\" ] More \u00b6 Click here to learn more information about Argo Events DR/HA recommendations.","title":"EventSource High Availability"},{"location":"eventsources/ha/#eventsource-high-availability","text":"EventSource controller creates a k8s deployment (replica number defaults to 1) for each EventSource object to watch the events. HA can be achieved by setting spec.replicas to a number greater than 1. Some types of the event sources do not allow multiple live clients with same attributes (i.e. multiple clients with same clientID connecting to a NATS server), or multiple event source PODs will generate duplicated events to downstream, so the HA strategies are different for different event sources. Please DO NOT manually scale up the replicas, that might cause unexpected behaviors!","title":"EventSource High Availability"},{"location":"eventsources/ha/#active-active","text":"Active-Active strategy is applied to the following EventSource types. AWS SNS AWS SQS Bitbucket Bitbucket Server GitHub GitLab NetApp Storage GRID Slack Stripe Webhook When spec.replicas is set to N (N > 1), all the N Pods serve traffic.","title":"Active-Active"},{"location":"eventsources/ha/#active-passive","text":"If following EventSource types have spec.replicas > 1 , Active-Passive strategy is used, which means only one Pod serves traffic and the rest ones stand by. One of standby Pods will be automatically elected to be active if the old one is gone. AMQP Azure Events Hub Calendar Emitter GCP PubSub Generic File HDFS Kafka Minio MQTT NATS NSQ Pulsar Redis Resource","title":"Active-Passive"},{"location":"eventsources/ha/#kubernetes-leader-election","text":"By default, Argo Events will use NATS for the HA leader election except when using a Kafka Eventbus, in which case a kubernetes leader election will be used. If using a different EventBus you can opt-in to a Kubernetes native leader election by specifying the following annotation. annotations : events.argoproj.io/leader-election : k8s To use Kubernetes leader election the following RBAC rules need to be associated with the EventSource ServiceAccount. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : argo-events-leaderelection-role rules : - apiGroups : [ \"coordination.k8s.io\" ] resources : [ \"leases\" ] verbs : [ \"get\" , \"create\" , \"update\" ]","title":"Kubernetes Leader Election"},{"location":"eventsources/ha/#more","text":"Click here to learn more information about Argo Events DR/HA recommendations.","title":"More"},{"location":"eventsources/multiple-events/","text":"EventSource With Multiple Events \u00b6 v0.17.0 and after Multiple events can be configured in a single EventSource, they can be either one event source type, or mixed event source types with some limitations. Single EventSource Type \u00b6 A single type EventSource configuration: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example method : POST example-foo : port : \"13000\" endpoint : /example2 method : POST For the example above, there are 2 events configured in the EventSource named webhook . Mixed EventSource Types \u00b6 EventSource is allowed to have mixed types of events configured. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : mixed-sources spec : webhook : webhook-example : # eventName port : \"12000\" endpoint : /example method : POST sns : sns-example : # eventName topicArn : arn:aws:sns:us-east-1:XXXXXXXX:test webhook : endpoint : \"/\" port : \"15000\" accessKey : key : my-key name : my-name secretKey : key : my-secret-key name : my-secret-name region : us-east-1 However, there are some rules need to follow to do it: EventSource types with Active-Active HA strategy can not be mixed with types with Active-Passive strategy, for EventSource types, see EventSource High Availability for the detail. Event Name (i.e. webhook-example and sns-example above, refer to EventSource Names ) needs to be unique in the EventSource, same eventName is not allowed even they are in different event source types. The reason for that is, we use eventSourceName and eventName as the dependency attributes in Sensor.","title":"EventSource With Multiple Events"},{"location":"eventsources/multiple-events/#eventsource-with-multiple-events","text":"v0.17.0 and after Multiple events can be configured in a single EventSource, they can be either one event source type, or mixed event source types with some limitations.","title":"EventSource With Multiple Events"},{"location":"eventsources/multiple-events/#single-eventsource-type","text":"A single type EventSource configuration: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example method : POST example-foo : port : \"13000\" endpoint : /example2 method : POST For the example above, there are 2 events configured in the EventSource named webhook .","title":"Single EventSource Type"},{"location":"eventsources/multiple-events/#mixed-eventsource-types","text":"EventSource is allowed to have mixed types of events configured. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : mixed-sources spec : webhook : webhook-example : # eventName port : \"12000\" endpoint : /example method : POST sns : sns-example : # eventName topicArn : arn:aws:sns:us-east-1:XXXXXXXX:test webhook : endpoint : \"/\" port : \"15000\" accessKey : key : my-key name : my-name secretKey : key : my-secret-key name : my-secret-name region : us-east-1 However, there are some rules need to follow to do it: EventSource types with Active-Active HA strategy can not be mixed with types with Active-Passive strategy, for EventSource types, see EventSource High Availability for the detail. Event Name (i.e. webhook-example and sns-example above, refer to EventSource Names ) needs to be unique in the EventSource, same eventName is not allowed even they are in different event source types. The reason for that is, we use eventSourceName and eventName as the dependency attributes in Sensor.","title":"Mixed EventSource Types"},{"location":"eventsources/naming/","text":"EventSource Names \u00b6 In a Sensor object, a dependency is defined as: dependencies : - name : test-dep eventSourceName : webhook-example eventName : example The eventSourceName and eventName might be confusing. Take the following EventSource example, the eventSourceName and eventName are described as below. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook-example # eventSourceName spec : webhook : example : # eventName port : \"12000\" endpoint : /example method : POST example-foo : # eventName port : \"13000\" endpoint : /example2 method : POST EventSourceName \u00b6 eventSourceName is the name of the dependent EventSource object, i.e. webhook-example in the example above. EventName \u00b6 eventName is the map key of a configured event. In the example above, eventName could be example or example-foo .","title":"EventSource Names"},{"location":"eventsources/naming/#eventsource-names","text":"In a Sensor object, a dependency is defined as: dependencies : - name : test-dep eventSourceName : webhook-example eventName : example The eventSourceName and eventName might be confusing. Take the following EventSource example, the eventSourceName and eventName are described as below. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook-example # eventSourceName spec : webhook : example : # eventName port : \"12000\" endpoint : /example method : POST example-foo : # eventName port : \"13000\" endpoint : /example2 method : POST","title":"EventSource Names"},{"location":"eventsources/naming/#eventsourcename","text":"eventSourceName is the name of the dependent EventSource object, i.e. webhook-example in the example above.","title":"EventSourceName"},{"location":"eventsources/naming/#eventname","text":"eventName is the map key of a configured event. In the example above, eventName could be example or example-foo .","title":"EventName"},{"location":"eventsources/services/","text":"EventSource Services \u00b6 Some of the EventSources ( webhook , github , gitlab , sns , slack , Storage GRID and stripe ) start an HTTP service to receive the events, for your convenience, there is a field named service within EventSource spec can help you create a ClusterIP service for testing. For example: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : service : ports : - port : 12000 targetPort : 12000 webhook : example : port : \"12000\" endpoint : /example method : POST However, the generated service is ONLY for testing purpose, if you want to expose the endpoint for external access, please manage it by using native K8s objects (i.e. a Load Balancer type Service, or an Ingress), and remove service field from the EventSource object. For example, you can create a K8s service with the selector eventsource-name: webhook to select pods created for the \"webhook\" event source, like the following: apiVersion : v1 kind : Service metadata : name : webhook-eventsource spec : ports : - port : 12000 protocol : TCP targetPort : 12000 selector : eventsource-name : webhook type : NodePort Then you can expose the service for external access using native K8s objects as mentioned above. You can refer to webhook heath check if you need a health check endpoint for LB Service or Ingress configuration.","title":"EventSource Services"},{"location":"eventsources/services/#eventsource-services","text":"Some of the EventSources ( webhook , github , gitlab , sns , slack , Storage GRID and stripe ) start an HTTP service to receive the events, for your convenience, there is a field named service within EventSource spec can help you create a ClusterIP service for testing. For example: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : service : ports : - port : 12000 targetPort : 12000 webhook : example : port : \"12000\" endpoint : /example method : POST However, the generated service is ONLY for testing purpose, if you want to expose the endpoint for external access, please manage it by using native K8s objects (i.e. a Load Balancer type Service, or an Ingress), and remove service field from the EventSource object. For example, you can create a K8s service with the selector eventsource-name: webhook to select pods created for the \"webhook\" event source, like the following: apiVersion : v1 kind : Service metadata : name : webhook-eventsource spec : ports : - port : 12000 protocol : TCP targetPort : 12000 selector : eventsource-name : webhook type : NodePort Then you can expose the service for external access using native K8s objects as mentioned above. You can refer to webhook heath check if you need a health check endpoint for LB Service or Ingress configuration.","title":"EventSource Services"},{"location":"eventsources/webhook-authentication/","text":"Webhook Authentication \u00b6 v1.0 and after For webhook event source, if you want to get your endpoint protected from unauthorized accessing, you can specify authSecret to the spec, which is a K8s secret key selector. This simple authentication approach also works for webhook extended event sources, if that event source does not have a built in authenticator. Firstly, create a k8s secret containing your token. echo -n 'af3qqs321f2ddwf1e2e67dfda3fs' > ./token.txt kubectl create secret generic my-webhook-token --from-file = my-token = ./token.txt Then add authSecret to your webhook EventSource. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example method : POST authSecret : name : my-webhook-token key : my-token Now you can authenticate your webhook endpoint with the configured token. TOKEN = \"Bearer af3qqs321f2ddwf1e2e67dfda3fs\" curl -X POST -H \"Authorization: $TOKEN \" -d \"{your data}\" http://xxxxx:12000/example","title":"Webhook Authentication"},{"location":"eventsources/webhook-authentication/#webhook-authentication","text":"v1.0 and after For webhook event source, if you want to get your endpoint protected from unauthorized accessing, you can specify authSecret to the spec, which is a K8s secret key selector. This simple authentication approach also works for webhook extended event sources, if that event source does not have a built in authenticator. Firstly, create a k8s secret containing your token. echo -n 'af3qqs321f2ddwf1e2e67dfda3fs' > ./token.txt kubectl create secret generic my-webhook-token --from-file = my-token = ./token.txt Then add authSecret to your webhook EventSource. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example method : POST authSecret : name : my-webhook-token key : my-token Now you can authenticate your webhook endpoint with the configured token. TOKEN = \"Bearer af3qqs321f2ddwf1e2e67dfda3fs\" curl -X POST -H \"Authorization: $TOKEN \" -d \"{your data}\" http://xxxxx:12000/example","title":"Webhook Authentication"},{"location":"eventsources/webhook-health-check/","text":"Webhook Health Check \u00b6 For webhook or webhook extended event sources such as github , gitlab , sns , slack , Storage GRID and stripe , besides the endpoint configured in the spec, an extra endpoint :${port}/health will also be created, this is useful for LB or Ingress configuration for the event source, where usually a health check endpoint is required. For example, the following EventSource object will have 4 endpoints created, :12000/example1 , :12000/health , :13000/example2 and :13000/health . An HTTP GET request to the health endpoint returns a text OK with HTTP response code 200 . apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example1 method : POST example-foo : port : \"13000\" endpoint : /example2 method : POST","title":"Webhook Health Check"},{"location":"eventsources/webhook-health-check/#webhook-health-check","text":"For webhook or webhook extended event sources such as github , gitlab , sns , slack , Storage GRID and stripe , besides the endpoint configured in the spec, an extra endpoint :${port}/health will also be created, this is useful for LB or Ingress configuration for the event source, where usually a health check endpoint is required. For example, the following EventSource object will have 4 endpoints created, :12000/example1 , :12000/health , :13000/example2 and :13000/health . An HTTP GET request to the health endpoint returns a text OK with HTTP response code 200 . apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example1 method : POST example-foo : port : \"13000\" endpoint : /example2 method : POST","title":"Webhook Health Check"},{"location":"eventsources/setup/amqp/","text":"AMQP \u00b6 AMQP event-source listens to messages on the MQ and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"contentType\" : \"ContentType is the MIME content type\" , \"contentEncoding\" : \"ContentEncoding is the MIME content encoding\" , \"deliveryMode\" : \"Delivery mode can be either - non-persistent (1) or persistent (2)\" , \"priority\" : \"Priority refers to the use - 0 to 9\" , \"correlationId\" : \"CorrelationId is the correlation identifier\" , \"replyTo\" : \"ReplyTo is the address to reply to (ex: RPC)\" , \"expiration\" : \"Expiration refers to message expiration spec\" , \"messageId\" : \"MessageId is message identifier\" , \"timestamp\" : \"Timestamp refers to the message timestamp\" , \"type\" : \"Type refers to the message type name\" , \"appId\" : \"AppId refers to the application id\" , \"exchange\" : \"Exchange is basic.publish exchange\" , \"routingKey\" : \"RoutingKey is basic.publish routing key\" , \"body\" : \"Body represents the message body\" , } } Setup \u00b6 Lets set up RabbitMQ locally. apiVersion : v1 kind : Service metadata : labels : component : rabbitmq name : rabbitmq - service spec : ports : - port : 5672 selector : app : taskQueue component : rabbitmq --- apiVersion : v1 kind : ReplicationController metadata : labels : component : rabbitmq name : rabbitmq - controller spec : replicas : 1 template : metadata : labels : app : taskQueue component : rabbitmq spec : containers : - image : rabbitmq name : rabbitmq ports : - containerPort : 5672 resources : limits : cpu : 100 m Make sure the RabbitMQ controller pod is up and running before proceeding further. Expose the RabbitMQ server to local publisher using port-forward . kubectl -n argo-events port-forward 5672:5672 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/amqp.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the exchange specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/amqp.yaml Lets set up a RabbitMQ publisher. If you don't have pika installed, run. python -m pip install pika --upgrade Open a python REPL and run following code to publish a message on exchange called test . import pika connection = pika . BlockingConnection ( pika . ConnectionParameters ( 'localhost' )) channel = connection . channel () channel . basic_publish ( exchange = 'test' , routing_key = 'hello' , body = '{\"message\": \"hello\"}' ) As soon as you publish a message, sensor will trigger an Argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"AMQP"},{"location":"eventsources/setup/amqp/#amqp","text":"AMQP event-source listens to messages on the MQ and helps sensor trigger the workloads.","title":"AMQP"},{"location":"eventsources/setup/amqp/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"contentType\" : \"ContentType is the MIME content type\" , \"contentEncoding\" : \"ContentEncoding is the MIME content encoding\" , \"deliveryMode\" : \"Delivery mode can be either - non-persistent (1) or persistent (2)\" , \"priority\" : \"Priority refers to the use - 0 to 9\" , \"correlationId\" : \"CorrelationId is the correlation identifier\" , \"replyTo\" : \"ReplyTo is the address to reply to (ex: RPC)\" , \"expiration\" : \"Expiration refers to message expiration spec\" , \"messageId\" : \"MessageId is message identifier\" , \"timestamp\" : \"Timestamp refers to the message timestamp\" , \"type\" : \"Type refers to the message type name\" , \"appId\" : \"AppId refers to the application id\" , \"exchange\" : \"Exchange is basic.publish exchange\" , \"routingKey\" : \"RoutingKey is basic.publish routing key\" , \"body\" : \"Body represents the message body\" , } }","title":"Event Structure"},{"location":"eventsources/setup/amqp/#setup","text":"Lets set up RabbitMQ locally. apiVersion : v1 kind : Service metadata : labels : component : rabbitmq name : rabbitmq - service spec : ports : - port : 5672 selector : app : taskQueue component : rabbitmq --- apiVersion : v1 kind : ReplicationController metadata : labels : component : rabbitmq name : rabbitmq - controller spec : replicas : 1 template : metadata : labels : app : taskQueue component : rabbitmq spec : containers : - image : rabbitmq name : rabbitmq ports : - containerPort : 5672 resources : limits : cpu : 100 m Make sure the RabbitMQ controller pod is up and running before proceeding further. Expose the RabbitMQ server to local publisher using port-forward . kubectl -n argo-events port-forward 5672:5672 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/amqp.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the exchange specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/amqp.yaml Lets set up a RabbitMQ publisher. If you don't have pika installed, run. python -m pip install pika --upgrade Open a python REPL and run following code to publish a message on exchange called test . import pika connection = pika . BlockingConnection ( pika . ConnectionParameters ( 'localhost' )) channel = connection . channel () channel . basic_publish ( exchange = 'test' , routing_key = 'hello' , body = '{\"message\": \"hello\"}' ) As soon as you publish a message, sensor will trigger an Argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/amqp/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/aws-sns/","text":"AWS SNS \u00b6 SNS event-source subscribes to AWS SNS topics, listens events and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : \"sns headers\" , \"body\" : \"body refers to the sns notification data\" , } } Setup \u00b6 Create a topic called test using aws cli or AWS SNS console. Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Deploy the secret. kubectl -n argo-events apply -f aws-secret.yaml The event-source for AWS SNS creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from AWS. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to update the URL in the configuration within the event-source. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/aws-sns.yaml Go to SNS settings on AWS and verify the webhook is registered. You can also check it by inspecting the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-sns.yaml Publish a message to the SNS topic, and it will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"AWS SNS"},{"location":"eventsources/setup/aws-sns/#aws-sns","text":"SNS event-source subscribes to AWS SNS topics, listens events and helps sensor trigger the workloads.","title":"AWS SNS"},{"location":"eventsources/setup/aws-sns/#event-structure","text":"The structure of an event dispatched by the event-source over eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : \"sns headers\" , \"body\" : \"body refers to the sns notification data\" , } }","title":"Event Structure"},{"location":"eventsources/setup/aws-sns/#setup","text":"Create a topic called test using aws cli or AWS SNS console. Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Deploy the secret. kubectl -n argo-events apply -f aws-secret.yaml The event-source for AWS SNS creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from AWS. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to update the URL in the configuration within the event-source. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/aws-sns.yaml Go to SNS settings on AWS and verify the webhook is registered. You can also check it by inspecting the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-sns.yaml Publish a message to the SNS topic, and it will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/aws-sns/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/aws-sqs/","text":"AWS SQS \u00b6 SQS event-source listens to messages on AWS SQS queue and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"messageId\" : \"message id\" , // Each message attribute consists of a Name, Type, and Value. For more information, // see Amazon SQS Message Attributes // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) // in the Amazon Simple Queue Service Developer Guide. \"messageAttributes\" : \"message attributes\" , \"body\" : \"Body is the message data\" , } } Setup \u00b6 Create a queue called test either using aws cli or AWS SQS management console. Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Deploy the secret. kubectl -n argo-events apply -f aws-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/aws-sqs.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the queue specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-sqs.yaml Dispatch a message on sqs queue. aws sqs send - message -- queue - url https : // sqs . us - east - 1 . amazonaws . com / XXXXX / test -- message - body '{\"message\": \"hello\"}' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"AWS SQS"},{"location":"eventsources/setup/aws-sqs/#aws-sqs","text":"SQS event-source listens to messages on AWS SQS queue and helps sensor trigger workloads.","title":"AWS SQS"},{"location":"eventsources/setup/aws-sqs/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"messageId\" : \"message id\" , // Each message attribute consists of a Name, Type, and Value. For more information, // see Amazon SQS Message Attributes // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) // in the Amazon Simple Queue Service Developer Guide. \"messageAttributes\" : \"message attributes\" , \"body\" : \"Body is the message data\" , } }","title":"Event Structure"},{"location":"eventsources/setup/aws-sqs/#setup","text":"Create a queue called test either using aws cli or AWS SQS management console. Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Deploy the secret. kubectl -n argo-events apply -f aws-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/aws-sqs.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the queue specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-sqs.yaml Dispatch a message on sqs queue. aws sqs send - message -- queue - url https : // sqs . us - east - 1 . amazonaws . com / XXXXX / test -- message - body '{\"message\": \"hello\"}' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/aws-sqs/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/azure-queue-storage/","text":"Azure Queue Storage \u00b6 Azure Queue Storage event-source allows you to consume messages from azure storage queues. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" \"time\" : \"event_time\" , }, \"data\" : { \"messageID\" : \"MessageID is the ID of the message\" , \"body\" : \"Body represents the message body\" , \"insertionTime\" : \"InsertionTime is the time the message was inserted into the queue\" , } } Setup \u00b6 Create a queue called test either using az cli or Azure storage management console. Fetch your connection string for Azure Queue Storage and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-queue-storage.yaml Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-queue-storage.yaml Dispatch a message to the queue. az storage message put -q test --content {\"message\": \"hello\"}' --account-name mystorageaccount --connection-string \"\" Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Azure Queue Storage"},{"location":"eventsources/setup/azure-queue-storage/#azure-queue-storage","text":"Azure Queue Storage event-source allows you to consume messages from azure storage queues.","title":"Azure Queue Storage"},{"location":"eventsources/setup/azure-queue-storage/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" \"time\" : \"event_time\" , }, \"data\" : { \"messageID\" : \"MessageID is the ID of the message\" , \"body\" : \"Body represents the message body\" , \"insertionTime\" : \"InsertionTime is the time the message was inserted into the queue\" , } }","title":"Event Structure"},{"location":"eventsources/setup/azure-queue-storage/#setup","text":"Create a queue called test either using az cli or Azure storage management console. Fetch your connection string for Azure Queue Storage and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-queue-storage.yaml Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-queue-storage.yaml Dispatch a message to the queue. az storage message put -q test --content {\"message\": \"hello\"}' --account-name mystorageaccount --connection-string \"\" Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/azure-queue-storage/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/azure-service-bus/","text":"Azure Service Bus \u00b6 Service Bus event-source allows you to consume messages from queus and topics in Azure Service Bus and helps sensor trigger workflows. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" \"time\" : \"event_time\" , }, \"data\" : { \"applicationProperties\" : \"ApplicationProperties can be used to store custom metadata for a message\" , \"body\" : \"Body represents the message body\" , \"contentType\" : \"ContentType is the MIME content type\" , \"correlationID\" : \"CorrelationID is the correlation identifier\" , \"enqueuedTime\" : \"EnqueuedTime is the time when the message was enqueued\" , \"messageID\" : \"ID of the message\" , \"replyTo\" : \"ReplyTo is an application-defined value specify a reply path to the receiver of the message\" , \"sequenceNumber\" : \"SequenceNumber is a unique number assigned to a message by Service Bus\" , \"subject\" : \"Subject enables an application to indicate the purpose of the message, similar to an email subject line\" , } } Setup \u00b6 Create a queue called test either using Azure CLI or Azure Service Bus management console. Fetch your connection string for Azure Service Bus and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-service-bus.yaml Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus.yaml Lets set up a Service Bus client. If you don't have azure-servicebus installed, run. python -m pip install azure-servicebus --upgrade Open a python REPL and run the following code to send a message on the queue called test . Before running the code, make sure you have the SERVICE_BUS_CONNECTION_STRING environment variable set. This is the connection string for your Azure Service Bus. import os , json from azure.servicebus import ServiceBusClient , ServiceBusMessage servicebus_client = ServiceBusClient . from_connection_string ( conn_str = os . environ [ 'SERVICE_BUS_CONNECTION_STRING' ]) with servicebus_client : sender = servicebus_client . get_queue_sender ( queue_name = \"test\" ) with sender : message = ServiceBusMessage ( '{\"hello\": \"world\"}' ) sender . send_messages ( message ) As soon as you publish a message, sensor will trigger an Argo workflow. Run argo list to find the workflow.","title":"Azure Service Bus"},{"location":"eventsources/setup/azure-service-bus/#azure-service-bus","text":"Service Bus event-source allows you to consume messages from queus and topics in Azure Service Bus and helps sensor trigger workflows.","title":"Azure Service Bus"},{"location":"eventsources/setup/azure-service-bus/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" \"time\" : \"event_time\" , }, \"data\" : { \"applicationProperties\" : \"ApplicationProperties can be used to store custom metadata for a message\" , \"body\" : \"Body represents the message body\" , \"contentType\" : \"ContentType is the MIME content type\" , \"correlationID\" : \"CorrelationID is the correlation identifier\" , \"enqueuedTime\" : \"EnqueuedTime is the time when the message was enqueued\" , \"messageID\" : \"ID of the message\" , \"replyTo\" : \"ReplyTo is an application-defined value specify a reply path to the receiver of the message\" , \"sequenceNumber\" : \"SequenceNumber is a unique number assigned to a message by Service Bus\" , \"subject\" : \"Subject enables an application to indicate the purpose of the message, similar to an email subject line\" , } }","title":"Event Structure"},{"location":"eventsources/setup/azure-service-bus/#setup","text":"Create a queue called test either using Azure CLI or Azure Service Bus management console. Fetch your connection string for Azure Service Bus and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-service-bus.yaml Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus.yaml Lets set up a Service Bus client. If you don't have azure-servicebus installed, run. python -m pip install azure-servicebus --upgrade Open a python REPL and run the following code to send a message on the queue called test . Before running the code, make sure you have the SERVICE_BUS_CONNECTION_STRING environment variable set. This is the connection string for your Azure Service Bus. import os , json from azure.servicebus import ServiceBusClient , ServiceBusMessage servicebus_client = ServiceBusClient . from_connection_string ( conn_str = os . environ [ 'SERVICE_BUS_CONNECTION_STRING' ]) with servicebus_client : sender = servicebus_client . get_queue_sender ( queue_name = \"test\" ) with sender : message = ServiceBusMessage ( '{\"hello\": \"world\"}' ) sender . send_messages ( message ) As soon as you publish a message, sensor will trigger an Argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/bitbucket/","text":"Bitbucket (Cloud) \u00b6 Bitbucket event-source programmatically configures webhooks for projects on Bitbucket and helps sensor trigger the workloads on events. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Bitbucket event payload\" , \"headers\" : \"Headers from the Bitbucket event\" , } } Specification \u00b6 Bitbucket event-source specification is available here . Example event-source yaml file is here . Setup \u00b6 NOTE: In this setup, we will use the basic auth strategy together with App password (there is also support for OAuth ). Create an App password if you don't have one. Follow instructions to create a new Bitbucket App password. Grant it the Webhooks - Read and Write permissions as well as any permissions that applies to the events that the webhook subscribes to (e.g. if you're using the example event-source yaml file which subscribes to repo:push event then you would also need to grant the Repositories - Read permission). Base64 encode your App password and your Bitbucket username. echo -n | base64 echo -n | base64 Create a secret called bitbucket-access that contains your encoded Bitbucket credentials. apiVersion : v1 kind : Secret metadata : name : bitbucket - access type : Opaque data : username : < base64 - encoded - username - from - previous - step > password : < base64 - encoded - password - from - previous - step > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f bitbucket-access.yaml The event-source for Bitbucket creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from Bitbucket. You can find more information on Ingress or Route online. Create the event source by running the following command. You can use the example event-source yaml file but make sure to replace the url field and to modify owner , repositorySlug and projectKey fields with your own repo. kubectl apply -n argo-events -f Go to Webhooks under your project settings on Bitbucket and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucket.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Bitbucket (Cloud)"},{"location":"eventsources/setup/bitbucket/#bitbucket-cloud","text":"Bitbucket event-source programmatically configures webhooks for projects on Bitbucket and helps sensor trigger the workloads on events.","title":"Bitbucket (Cloud)"},{"location":"eventsources/setup/bitbucket/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Bitbucket event payload\" , \"headers\" : \"Headers from the Bitbucket event\" , } }","title":"Event Structure"},{"location":"eventsources/setup/bitbucket/#specification","text":"Bitbucket event-source specification is available here . Example event-source yaml file is here .","title":"Specification"},{"location":"eventsources/setup/bitbucket/#setup","text":"NOTE: In this setup, we will use the basic auth strategy together with App password (there is also support for OAuth ). Create an App password if you don't have one. Follow instructions to create a new Bitbucket App password. Grant it the Webhooks - Read and Write permissions as well as any permissions that applies to the events that the webhook subscribes to (e.g. if you're using the example event-source yaml file which subscribes to repo:push event then you would also need to grant the Repositories - Read permission). Base64 encode your App password and your Bitbucket username. echo -n | base64 echo -n | base64 Create a secret called bitbucket-access that contains your encoded Bitbucket credentials. apiVersion : v1 kind : Secret metadata : name : bitbucket - access type : Opaque data : username : < base64 - encoded - username - from - previous - step > password : < base64 - encoded - password - from - previous - step > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f bitbucket-access.yaml The event-source for Bitbucket creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from Bitbucket. You can find more information on Ingress or Route online. Create the event source by running the following command. You can use the example event-source yaml file but make sure to replace the url field and to modify owner , repositorySlug and projectKey fields with your own repo. kubectl apply -n argo-events -f Go to Webhooks under your project settings on Bitbucket and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucket.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/bitbucket/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/bitbucketserver/","text":"Bitbucket Server \u00b6 Bitbucket Server event-source programmatically configures webhooks for projects on Bitbucket Server and helps sensor trigger the workloads on events. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Bitbucket Server event payload\" , \"headers\" : \"Headers from the Bitbucket Server event\" , } } Specification \u00b6 Bitbucket Server event-source specification is available here . Example event-source yaml file is here . Setup \u00b6 Create an API token if you don't have one. Follow instructions to create a new Bitbucket Server API Token. Grant it the Projects: Admin permissions. Base64 encode your API token key. echo -n | base64 Create a secret called bitbucketserver-access that contains your encoded Bitbucket Server API token. You can also include a secret key that is encoded with base64 for your webhook if any. apiVersion : v1 kind : Secret metadata : name : bitbucketserver - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > secret : < base64 - encoded - webhook - secret - key > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f bitbucketserver-access.yaml The event-source for Bitbucket Server creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Bitbucket Server. You can find more information on Ingress or Route online. Create the event source by running the following command. You can use the example event-source yaml file from here but make sure to replace the url field and to modify the repositories list with your own repos. kubectl apply -n argo-events -f Go to Webhooks under your project settings on Bitbucket Server and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucketserver.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Bitbucket Server"},{"location":"eventsources/setup/bitbucketserver/#bitbucket-server","text":"Bitbucket Server event-source programmatically configures webhooks for projects on Bitbucket Server and helps sensor trigger the workloads on events.","title":"Bitbucket Server"},{"location":"eventsources/setup/bitbucketserver/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Bitbucket Server event payload\" , \"headers\" : \"Headers from the Bitbucket Server event\" , } }","title":"Event Structure"},{"location":"eventsources/setup/bitbucketserver/#specification","text":"Bitbucket Server event-source specification is available here . Example event-source yaml file is here .","title":"Specification"},{"location":"eventsources/setup/bitbucketserver/#setup","text":"Create an API token if you don't have one. Follow instructions to create a new Bitbucket Server API Token. Grant it the Projects: Admin permissions. Base64 encode your API token key. echo -n | base64 Create a secret called bitbucketserver-access that contains your encoded Bitbucket Server API token. You can also include a secret key that is encoded with base64 for your webhook if any. apiVersion : v1 kind : Secret metadata : name : bitbucketserver - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > secret : < base64 - encoded - webhook - secret - key > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f bitbucketserver-access.yaml The event-source for Bitbucket Server creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Bitbucket Server. You can find more information on Ingress or Route online. Create the event source by running the following command. You can use the example event-source yaml file from here but make sure to replace the url field and to modify the repositories list with your own repos. kubectl apply -n argo-events -f Go to Webhooks under your project settings on Bitbucket Server and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucketserver.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/bitbucketserver/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/calendar/","text":"Calendar \u00b6 Calendar event-source generates events on either a cron schedule or an interval and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"eventTime\" : { /* UTC time of the event */ }, \"userPayload\" : { /* static payload available in the event source */ }, } } Specification \u00b6 Calendar event-source specification is available here . Setup \u00b6 Install the event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/calendar.yaml The event-source will generate events at every 10 seconds. Let's create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/calendar.yaml Once the sensor pod is in running state, wait for next interval to occur for sensor to trigger workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Calendar"},{"location":"eventsources/setup/calendar/#calendar","text":"Calendar event-source generates events on either a cron schedule or an interval and helps sensor trigger workloads.","title":"Calendar"},{"location":"eventsources/setup/calendar/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"eventTime\" : { /* UTC time of the event */ }, \"userPayload\" : { /* static payload available in the event source */ }, } }","title":"Event Structure"},{"location":"eventsources/setup/calendar/#specification","text":"Calendar event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/calendar/#setup","text":"Install the event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/calendar.yaml The event-source will generate events at every 10 seconds. Let's create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/calendar.yaml Once the sensor pod is in running state, wait for next interval to occur for sensor to trigger workflow.","title":"Setup"},{"location":"eventsources/setup/calendar/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/emitter/","text":"Emitter \u00b6 Emitter event-source subscribes to a channel and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"name_of_the_topic\" , \"body\" : \"message_payload\" } } Specification \u00b6 Emitter event-source specification is available here . Setup \u00b6 Deploy the emitter in your local K8s cluster. --- apiVersion: v1 kind: Service metadata: name: broker labels: app: broker spec: clusterIP: None ports: - port: 4000 targetPort: 4000 selector: app: broker --- apiVersion: apps/v1 kind: Deployment metadata: name: broker spec: replicas: 1 selector: matchLabels: app: broker template: metadata: labels: app: broker spec: containers: - env: - name: EMITTER_LICENSE value: \"zT83oDV0DWY5_JysbSTPTDr8KB0AAAAAAAAAAAAAAAI\" # This is a test license, DO NOT USE IN PRODUCTION! - name: EMITTER_CLUSTER_SEED value: \"broker\" - name: EMITTER_CLUSTER_ADVERTISE value: \"private:4000\" name: broker image: emitter/server:latest ports: - containerPort: 8080 - containerPort: 443 - containerPort: 4000 volumeMounts: - name: broker-volume mountPath: /data volumes: - name: broker-volume hostPath: path: /emitter #directory on host Create the event-source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/emitter.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the topic specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/emitter.yaml Send a message on emitter channel using one of the clients https://emitter.io/develop/golang/ . Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Emitter"},{"location":"eventsources/setup/emitter/#emitter","text":"Emitter event-source subscribes to a channel and helps sensor trigger the workloads.","title":"Emitter"},{"location":"eventsources/setup/emitter/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"name_of_the_topic\" , \"body\" : \"message_payload\" } }","title":"Event Structure"},{"location":"eventsources/setup/emitter/#specification","text":"Emitter event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/emitter/#setup","text":"Deploy the emitter in your local K8s cluster. --- apiVersion: v1 kind: Service metadata: name: broker labels: app: broker spec: clusterIP: None ports: - port: 4000 targetPort: 4000 selector: app: broker --- apiVersion: apps/v1 kind: Deployment metadata: name: broker spec: replicas: 1 selector: matchLabels: app: broker template: metadata: labels: app: broker spec: containers: - env: - name: EMITTER_LICENSE value: \"zT83oDV0DWY5_JysbSTPTDr8KB0AAAAAAAAAAAAAAAI\" # This is a test license, DO NOT USE IN PRODUCTION! - name: EMITTER_CLUSTER_SEED value: \"broker\" - name: EMITTER_CLUSTER_ADVERTISE value: \"private:4000\" name: broker image: emitter/server:latest ports: - containerPort: 8080 - containerPort: 443 - containerPort: 4000 volumeMounts: - name: broker-volume mountPath: /data volumes: - name: broker-volume hostPath: path: /emitter #directory on host Create the event-source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/emitter.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the topic specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/emitter.yaml Send a message on emitter channel using one of the clients https://emitter.io/develop/golang/ . Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/emitter/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/file/","text":"File \u00b6 File event-source listens to file system events and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"name\" : \"Relative path to the file or directory\" , \"op\" : \"File operation that triggered the event\" // Create, Write, Remove, Rename, Chmod } } Specification \u00b6 File event-source specification is available here . Setup \u00b6 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/file.yaml The event source has configuration to listen to file system events for test-data directory and file called x.txt . Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/file.yaml Log into the event-source pod by running following command. kubectl - n argo - events exec - it < event - source - pod - name > - c file - events -- / bin / bash Let's create a file called x.txt under test-data directory in the event-source pod. cd test-data cat < x.txt hello EOF Once you create file x.txt , the sensor will trigger argo workflow. Run argo list to find the workflow. For real-world use cases, you should use PersistentVolumeClaim. Troubleshoot \u00b6 Please read the FAQ .","title":"File"},{"location":"eventsources/setup/file/#file","text":"File event-source listens to file system events and helps sensor trigger workloads.","title":"File"},{"location":"eventsources/setup/file/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"name\" : \"Relative path to the file or directory\" , \"op\" : \"File operation that triggered the event\" // Create, Write, Remove, Rename, Chmod } }","title":"Event Structure"},{"location":"eventsources/setup/file/#specification","text":"File event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/file/#setup","text":"Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/file.yaml The event source has configuration to listen to file system events for test-data directory and file called x.txt . Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/file.yaml Log into the event-source pod by running following command. kubectl - n argo - events exec - it < event - source - pod - name > - c file - events -- / bin / bash Let's create a file called x.txt under test-data directory in the event-source pod. cd test-data cat < x.txt hello EOF Once you create file x.txt , the sensor will trigger argo workflow. Run argo list to find the workflow. For real-world use cases, you should use PersistentVolumeClaim.","title":"Setup"},{"location":"eventsources/setup/file/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/gcp-pub-sub/","text":"GCP Pub/Sub \u00b6 GCP Pub/Sub event-source subscribes to messages published by GCP publisher and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"id\" : \"message id\" , // Attributes represents the key-value pairs the current message // is labelled with. \"attributes\" : \"key-values\" , \"publishTime\" : \"// The time at which the message was published\" , \"body\" : \"body refers to the message data\" , } } Specification \u00b6 GCP Pub/Sub event-source specification is available here . Setup \u00b6 Fetch the project credentials JSON file from GCP console. If you use Workload Identity, you can skip this and next steps. Create a K8s secret called gcp-credentials to store the credentials file. apiVersion : v1 data : key . json : < YOUR_CREDENTIALS_STRING_FROM_JSON_FILE > kind : Secret metadata : name : gcp - credentials namespace : argo - events type : Opaque Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/gcp-pubsub.yaml If you use Workload Identity, omit credentialSecret field. Instead don't forget to configure appropriate service account (see example ). Inspect the event-source pod logs to make sure it was able to subscribe to the topic. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gcp-pubsub.yaml Publish a message from GCP Pub/Sub console. Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Subscription, topic and service account preparation \u00b6 You can use existing subscriptions/topics, or let Argo Events create them. Here's the table of which fields are required in the configuration file and what permissions are needed for service account. Actions Required configuration fields Necessary permissions for service account Example role Use existing subscription Existing SubscriptionID pubsub.subscriptions.consume for the subscription roles/pubsub.subscriber Use existing subscription and verify topic Existing SubscriptionID and its Topic Above + pubsub.subscriptions.get for the subscription roles/pubsub.subscriber + roles/pubsub.viewer Create subscription for existing topic Existing Topic ( SubscriptionID is optional\u2020) Above + pubsub.subscriptions.create for the project pubsub.topics.attachSubscription for the topic roles/pubsub.subscriber + roles/pubsub.editor Create topic and subscription Non-existing Topic ( SubscriptionID is optional\u2020) Above + pubsub.topic.create for the project roles/pubsub.subscriber + roles/pubsub.editor \u2020 If you omit SubscriptionID , a generated hash value is used. For more details about access control, refer to GCP documents: Access control | Cloud Pub/Sub Documentation | Google Cloud \u29c9 Troubleshoot \u00b6 Please read the FAQ .","title":"GCP Pub/Sub"},{"location":"eventsources/setup/gcp-pub-sub/#gcp-pubsub","text":"GCP Pub/Sub event-source subscribes to messages published by GCP publisher and helps sensor trigger workloads.","title":"GCP Pub/Sub"},{"location":"eventsources/setup/gcp-pub-sub/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"id\" : \"message id\" , // Attributes represents the key-value pairs the current message // is labelled with. \"attributes\" : \"key-values\" , \"publishTime\" : \"// The time at which the message was published\" , \"body\" : \"body refers to the message data\" , } }","title":"Event Structure"},{"location":"eventsources/setup/gcp-pub-sub/#specification","text":"GCP Pub/Sub event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/gcp-pub-sub/#setup","text":"Fetch the project credentials JSON file from GCP console. If you use Workload Identity, you can skip this and next steps. Create a K8s secret called gcp-credentials to store the credentials file. apiVersion : v1 data : key . json : < YOUR_CREDENTIALS_STRING_FROM_JSON_FILE > kind : Secret metadata : name : gcp - credentials namespace : argo - events type : Opaque Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/gcp-pubsub.yaml If you use Workload Identity, omit credentialSecret field. Instead don't forget to configure appropriate service account (see example ). Inspect the event-source pod logs to make sure it was able to subscribe to the topic. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gcp-pubsub.yaml Publish a message from GCP Pub/Sub console. Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/gcp-pub-sub/#subscription-topic-and-service-account-preparation","text":"You can use existing subscriptions/topics, or let Argo Events create them. Here's the table of which fields are required in the configuration file and what permissions are needed for service account. Actions Required configuration fields Necessary permissions for service account Example role Use existing subscription Existing SubscriptionID pubsub.subscriptions.consume for the subscription roles/pubsub.subscriber Use existing subscription and verify topic Existing SubscriptionID and its Topic Above + pubsub.subscriptions.get for the subscription roles/pubsub.subscriber + roles/pubsub.viewer Create subscription for existing topic Existing Topic ( SubscriptionID is optional\u2020) Above + pubsub.subscriptions.create for the project pubsub.topics.attachSubscription for the topic roles/pubsub.subscriber + roles/pubsub.editor Create topic and subscription Non-existing Topic ( SubscriptionID is optional\u2020) Above + pubsub.topic.create for the project roles/pubsub.subscriber + roles/pubsub.editor \u2020 If you omit SubscriptionID , a generated hash value is used. For more details about access control, refer to GCP documents: Access control | Cloud Pub/Sub Documentation | Google Cloud \u29c9","title":"Subscription, topic and service account preparation"},{"location":"eventsources/setup/gcp-pub-sub/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/github/","text":"GitHub \u00b6 GitHub event-source programmatically configures webhooks for projects on GitHub and helps sensor trigger the workloads on events. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Github event data\" , \"headers\" : \"Headers from the Github event\" , } } Specification \u00b6 GitHub event-source specification is available here . Example event-source yaml file is here . Setup \u00b6 Create an API token if you don't have one. Follow instructions to create a new GitHub API Token. Grant it the repo_hook permissions. Base64 encode your API token key. echo -n | base64 Create a secret called github-access that contains your encoded GitHub API token. You can also include a secret key that is encoded with base64 for your webhook if any. apiVersion : v1 kind : Secret metadata : name : github - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > secret : < base64 - encoded - webhook - secret - key > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f github-access.yaml The event-source for GitHub creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitHub. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to replace the url field. kubectl apply -n argo-events -f Go to Webhooks under your project settings on GitHub and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/github.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"GitHub"},{"location":"eventsources/setup/github/#github","text":"GitHub event-source programmatically configures webhooks for projects on GitHub and helps sensor trigger the workloads on events.","title":"GitHub"},{"location":"eventsources/setup/github/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Github event data\" , \"headers\" : \"Headers from the Github event\" , } }","title":"Event Structure"},{"location":"eventsources/setup/github/#specification","text":"GitHub event-source specification is available here . Example event-source yaml file is here .","title":"Specification"},{"location":"eventsources/setup/github/#setup","text":"Create an API token if you don't have one. Follow instructions to create a new GitHub API Token. Grant it the repo_hook permissions. Base64 encode your API token key. echo -n | base64 Create a secret called github-access that contains your encoded GitHub API token. You can also include a secret key that is encoded with base64 for your webhook if any. apiVersion : v1 kind : Secret metadata : name : github - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > secret : < base64 - encoded - webhook - secret - key > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f github-access.yaml The event-source for GitHub creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitHub. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to replace the url field. kubectl apply -n argo-events -f Go to Webhooks under your project settings on GitHub and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/github.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/github/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/gitlab/","text":"GitLab \u00b6 GitLab event-source programmatically configures webhooks for projects on GitLab and helps sensor trigger the workloads upon events. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the GitLab event data\" , \"headers\" : \"Headers from the GitLab event\" , } } Specification \u00b6 GitLab event-source specification is available here . Example event-source yaml file is here . Setup \u00b6 Create an API token if you don't have one. Follow instructions to create a new GitLab API Token. Grant it the api permissions. Base64 encode your api token key. echo -n | base64 Create a secret called gitlab-access . apiVersion : v1 kind : Secret metadata : name : gitlab - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f gitlab-access.yaml The event-source for GitLab creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitLab. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to update url field. kubectl apply -n argo-events -f Go to Webhooks under your project settings on GitLab and verify the webhook is registered. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gitlab.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"GitLab"},{"location":"eventsources/setup/gitlab/#gitlab","text":"GitLab event-source programmatically configures webhooks for projects on GitLab and helps sensor trigger the workloads upon events.","title":"GitLab"},{"location":"eventsources/setup/gitlab/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the GitLab event data\" , \"headers\" : \"Headers from the GitLab event\" , } }","title":"Event Structure"},{"location":"eventsources/setup/gitlab/#specification","text":"GitLab event-source specification is available here . Example event-source yaml file is here .","title":"Specification"},{"location":"eventsources/setup/gitlab/#setup","text":"Create an API token if you don't have one. Follow instructions to create a new GitLab API Token. Grant it the api permissions. Base64 encode your api token key. echo -n | base64 Create a secret called gitlab-access . apiVersion : v1 kind : Secret metadata : name : gitlab - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f gitlab-access.yaml The event-source for GitLab creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitLab. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to update url field. kubectl apply -n argo-events -f Go to Webhooks under your project settings on GitLab and verify the webhook is registered. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gitlab.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/gitlab/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/kafka/","text":"Kafka \u00b6 Kafka event-source listens to messages on topics and helps the sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"kafka_topic\" , \"partition\" : \"partition_number\" , \"body\" : \"message_body\" , \"timestamp\" : \"timestamp_of_the_message\" } } Specification \u00b6 Kafka event-source specification is available here . Setup \u00b6 Make sure to set up the Kafka cluster in Kubernetes if you don't already have one. You can refer to https://github.com/Yolean/kubernetes-kafka for installation instructions. Create the event source by running the following command. Make sure to update the appropriate fields. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/kafka.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/kafka.yaml Send message by using Kafka client. More info on how to send message at https://kafka.apache.org/quickstart . Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Kafka"},{"location":"eventsources/setup/kafka/#kafka","text":"Kafka event-source listens to messages on topics and helps the sensor trigger workloads.","title":"Kafka"},{"location":"eventsources/setup/kafka/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"kafka_topic\" , \"partition\" : \"partition_number\" , \"body\" : \"message_body\" , \"timestamp\" : \"timestamp_of_the_message\" } }","title":"Event Structure"},{"location":"eventsources/setup/kafka/#specification","text":"Kafka event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/kafka/#setup","text":"Make sure to set up the Kafka cluster in Kubernetes if you don't already have one. You can refer to https://github.com/Yolean/kubernetes-kafka for installation instructions. Create the event source by running the following command. Make sure to update the appropriate fields. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/kafka.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/kafka.yaml Send message by using Kafka client. More info on how to send message at https://kafka.apache.org/quickstart . Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/kafka/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/minio/","text":"Minio \u00b6 Minio event-source listens to minio bucket notifications and helps sensor trigger the workloads. Note : Minio event-source is exclusive for the Minio server. If you want to trigger workloads on AWS S3 bucket notification, please set up the AWS SNS event-source. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { notification : [ { /* Minio notification . More info is available at https : //docs.min.io/docs/minio-bucket-notification-guide.html } ] } } Setup \u00b6 Make sure to have the minio server deployed and reachable from the event-source. If you are running Minio locally, make sure to port-forward to minio pod in order to make the service available outside local K8s cluster. kubectl -n argo-events port-forward 9000:9000 Configure the minio client mc . mc config host add minio http://localhost:9000 minio minio123 Create a K8s secret that holds the access and secret key. This secret will be referred in the minio event source definition that we are going to install in a later step. apiVersion : v1 data : # base64 of minio accesskey : bWluaW8 = # base64 of minio123 secretkey : bWluaW8xMjM = kind : Secret metadata : name : artifacts - minio namespace : argo - events The event source we are going to use configures notifications for a bucket called input . mc mb minio/input Let's install event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/minio.yaml Let's create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/minio.yaml Create a file named and hello-world.txt and upload it onto to the input bucket. This will trigger the argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Minio"},{"location":"eventsources/setup/minio/#minio","text":"Minio event-source listens to minio bucket notifications and helps sensor trigger the workloads. Note : Minio event-source is exclusive for the Minio server. If you want to trigger workloads on AWS S3 bucket notification, please set up the AWS SNS event-source.","title":"Minio"},{"location":"eventsources/setup/minio/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { notification : [ { /* Minio notification . More info is available at https : //docs.min.io/docs/minio-bucket-notification-guide.html } ] } }","title":"Event Structure"},{"location":"eventsources/setup/minio/#setup","text":"Make sure to have the minio server deployed and reachable from the event-source. If you are running Minio locally, make sure to port-forward to minio pod in order to make the service available outside local K8s cluster. kubectl -n argo-events port-forward 9000:9000 Configure the minio client mc . mc config host add minio http://localhost:9000 minio minio123 Create a K8s secret that holds the access and secret key. This secret will be referred in the minio event source definition that we are going to install in a later step. apiVersion : v1 data : # base64 of minio accesskey : bWluaW8 = # base64 of minio123 secretkey : bWluaW8xMjM = kind : Secret metadata : name : artifacts - minio namespace : argo - events The event source we are going to use configures notifications for a bucket called input . mc mb minio/input Let's install event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/minio.yaml Let's create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/minio.yaml Create a file named and hello-world.txt and upload it onto to the input bucket. This will trigger the argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/minio/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/mqtt/","text":"MQTT \u00b6 The event-source listens to messages over MQTT and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"Topic refers to the MQTT topic name\" , \"messageId\" : \"MessageId is the unique ID for the message\" , \"body\" : \"Body is the message payload\" } } Specification \u00b6 MQTT event-source specification is available here . Setup \u00b6 Make sure to set up the MQTT Broker and Bridge in Kubernetes if you don't already have one. Create the event source by running the following command. Make sure to update the appropriate fields. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/mqtt.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/mqtt-sensor.yaml Send message by using MQTT client. Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"MQTT"},{"location":"eventsources/setup/mqtt/#mqtt","text":"The event-source listens to messages over MQTT and helps sensor trigger the workloads.","title":"MQTT"},{"location":"eventsources/setup/mqtt/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"Topic refers to the MQTT topic name\" , \"messageId\" : \"MessageId is the unique ID for the message\" , \"body\" : \"Body is the message payload\" } }","title":"Event Structure"},{"location":"eventsources/setup/mqtt/#specification","text":"MQTT event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/mqtt/#setup","text":"Make sure to set up the MQTT Broker and Bridge in Kubernetes if you don't already have one. Create the event source by running the following command. Make sure to update the appropriate fields. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/mqtt.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/mqtt-sensor.yaml Send message by using MQTT client. Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/mqtt/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/nats/","text":"NATS \u00b6 NATS event-source listens to NATS subject notifications and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"subject\" : \"name_of_the_nats_subject\" , \"headers\" : \"headers_of_the_nats_message\" , \"body\" : \"message_payload\" } } Specification \u00b6 NATS event-source specification is available here . Setup \u00b6 Make sure to have NATS cluster deployed in the Kubernetes. If you don't have one already installed, please refer https://github.com/nats-io/nats-operator for details. NATS cluster setup for test purposes, apiVersion : v1 kind : Service metadata : name : nats namespace : argo - events labels : component : nats spec : selector : component : nats type : ClusterIP ports : - name : client port : 4222 - name : cluster port : 6222 - name : monitor port : 8222 --- apiVersion : apps / v1beta1 kind : StatefulSet metadata : name : nats namespace : argo - events labels : component : nats spec : serviceName : nats replicas : 1 template : metadata : labels : component : nats spec : containers : - name : nats image : nats : latest ports : - containerPort : 4222 name : client - containerPort : 6222 name : cluster - containerPort : 8222 name : monitor livenessProbe : httpGet : path : / port : 8222 initialDelaySeconds : 10 timeoutSeconds : 5 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nats.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/nats.yaml If you are running NATS on local K8s cluster, make sure to port-forward to pod, kubectl -n argo-events port-forward 4222:4222 Publish a message for the subject specified in the event source. Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe . go run main . go - s localhost foo '{\"message\": \"hello\"}' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"NATS"},{"location":"eventsources/setup/nats/#nats","text":"NATS event-source listens to NATS subject notifications and helps sensor trigger the workloads.","title":"NATS"},{"location":"eventsources/setup/nats/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"subject\" : \"name_of_the_nats_subject\" , \"headers\" : \"headers_of_the_nats_message\" , \"body\" : \"message_payload\" } }","title":"Event Structure"},{"location":"eventsources/setup/nats/#specification","text":"NATS event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/nats/#setup","text":"Make sure to have NATS cluster deployed in the Kubernetes. If you don't have one already installed, please refer https://github.com/nats-io/nats-operator for details. NATS cluster setup for test purposes, apiVersion : v1 kind : Service metadata : name : nats namespace : argo - events labels : component : nats spec : selector : component : nats type : ClusterIP ports : - name : client port : 4222 - name : cluster port : 6222 - name : monitor port : 8222 --- apiVersion : apps / v1beta1 kind : StatefulSet metadata : name : nats namespace : argo - events labels : component : nats spec : serviceName : nats replicas : 1 template : metadata : labels : component : nats spec : containers : - name : nats image : nats : latest ports : - containerPort : 4222 name : client - containerPort : 6222 name : cluster - containerPort : 8222 name : monitor livenessProbe : httpGet : path : / port : 8222 initialDelaySeconds : 10 timeoutSeconds : 5 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nats.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/nats.yaml If you are running NATS on local K8s cluster, make sure to port-forward to pod, kubectl -n argo-events port-forward 4222:4222 Publish a message for the subject specified in the event source. Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe . go run main . go - s localhost foo '{\"message\": \"hello\"}' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/nats/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/nsq/","text":"NSQ \u00b6 NSQ event-source subscribes to nsq pub/sub notifications and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the message data\" , \"timestamp\" : \"timestamp of the message\" , \"nsqdAddress\" : \"NSQDAddress is the address of the nsq host\" } } Specification \u00b6 NSQ event-source is available here . Setup \u00b6 Deploy NSQ on local K8s cluster. apiVersion : v1 kind : Service metadata : name : nsqlookupd labels : app : nsq spec : ports : - port : 4160 targetPort : 4160 name : tcp - port : 4161 targetPort : 4161 name : http clusterIP : None selector : app : nsq component : nsqlookupd --- apiVersion : v1 kind : Service metadata : name : nsqd labels : app : nsq spec : ports : - port : 4150 targetPort : 4150 name : tcp - port : 4151 targetPort : 4151 name : http clusterIP : None selector : app : nsq component : nsqd --- apiVersion : v1 kind : Service metadata : name : nsqadmin labels : app : nsq spec : ports : - port : 4170 targetPort : 4170 name : tcp - port : 4171 targetPort : 4171 name : http selector : app : nsq component : nsqadmin --- apiVersion : apps / v1beta1 kind : StatefulSet metadata : name : nsqlookupd spec : serviceName : \"nsqlookupd\" replicas : 1 updateStrategy : type : RollingUpdate template : metadata : labels : app : nsq component : nsqlookupd spec : containers : - name : nsqlookupd image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4160 name : tcp - containerPort : 4161 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 2 command : - / nsqlookupd terminationGracePeriodSeconds : 5 --- apiVersion : apps / v1beta1 kind : Deployment metadata : name : nsqd spec : replicas : 1 selector : matchLabels : app : nsq component : nsqd template : metadata : labels : app : nsq component : nsqd spec : containers : - name : nsqd image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4150 name : tcp - containerPort : 4151 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 2 volumeMounts : - name : datadir mountPath : / data command : - / nsqd - - data - path - / data - - lookupd - tcp - address - nsqlookupd . argo - events . svc : 4160 - - broadcast - address - nsqd . argo - events . svc env : - name : HOSTNAME valueFrom : fieldRef : fieldPath : metadata . name terminationGracePeriodSeconds : 5 volumes : - name : datadir emptyDir : {} --- apiVersion : extensions / v1beta1 kind : Deployment metadata : name : nsqadmin spec : replicas : 1 template : metadata : labels : app : nsq component : nsqadmin spec : containers : - name : nsqadmin image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4170 name : tcp - containerPort : 4171 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 10 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 command : - / nsqadmin - - lookupd - http - address - nsqlookupd . argo - events . svc : 4161 terminationGracePeriodSeconds : 5 Expose NSQD by kubectl port-forward . kubectl -n argo-events port-forward service/nsqd 4151:4151 Create topic hello and channel my-channel . curl -X POST 'http://localhost:4151/topic/create?topic=hello' curl -X POST 'http://localhost:4151/channel/create?topic=hello&channel=my-channel' Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nsq.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/nsq.yaml Publish a message on topic hello and channel my-channel . curl -d '{\"message\": \"hello\"}' 'http://localhost:4151/pub?topic=hello&channel=my-channel' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"NSQ"},{"location":"eventsources/setup/nsq/#nsq","text":"NSQ event-source subscribes to nsq pub/sub notifications and helps sensor trigger the workloads.","title":"NSQ"},{"location":"eventsources/setup/nsq/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the message data\" , \"timestamp\" : \"timestamp of the message\" , \"nsqdAddress\" : \"NSQDAddress is the address of the nsq host\" } }","title":"Event Structure"},{"location":"eventsources/setup/nsq/#specification","text":"NSQ event-source is available here .","title":"Specification"},{"location":"eventsources/setup/nsq/#setup","text":"Deploy NSQ on local K8s cluster. apiVersion : v1 kind : Service metadata : name : nsqlookupd labels : app : nsq spec : ports : - port : 4160 targetPort : 4160 name : tcp - port : 4161 targetPort : 4161 name : http clusterIP : None selector : app : nsq component : nsqlookupd --- apiVersion : v1 kind : Service metadata : name : nsqd labels : app : nsq spec : ports : - port : 4150 targetPort : 4150 name : tcp - port : 4151 targetPort : 4151 name : http clusterIP : None selector : app : nsq component : nsqd --- apiVersion : v1 kind : Service metadata : name : nsqadmin labels : app : nsq spec : ports : - port : 4170 targetPort : 4170 name : tcp - port : 4171 targetPort : 4171 name : http selector : app : nsq component : nsqadmin --- apiVersion : apps / v1beta1 kind : StatefulSet metadata : name : nsqlookupd spec : serviceName : \"nsqlookupd\" replicas : 1 updateStrategy : type : RollingUpdate template : metadata : labels : app : nsq component : nsqlookupd spec : containers : - name : nsqlookupd image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4160 name : tcp - containerPort : 4161 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 2 command : - / nsqlookupd terminationGracePeriodSeconds : 5 --- apiVersion : apps / v1beta1 kind : Deployment metadata : name : nsqd spec : replicas : 1 selector : matchLabels : app : nsq component : nsqd template : metadata : labels : app : nsq component : nsqd spec : containers : - name : nsqd image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4150 name : tcp - containerPort : 4151 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 2 volumeMounts : - name : datadir mountPath : / data command : - / nsqd - - data - path - / data - - lookupd - tcp - address - nsqlookupd . argo - events . svc : 4160 - - broadcast - address - nsqd . argo - events . svc env : - name : HOSTNAME valueFrom : fieldRef : fieldPath : metadata . name terminationGracePeriodSeconds : 5 volumes : - name : datadir emptyDir : {} --- apiVersion : extensions / v1beta1 kind : Deployment metadata : name : nsqadmin spec : replicas : 1 template : metadata : labels : app : nsq component : nsqadmin spec : containers : - name : nsqadmin image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4170 name : tcp - containerPort : 4171 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 10 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 command : - / nsqadmin - - lookupd - http - address - nsqlookupd . argo - events . svc : 4161 terminationGracePeriodSeconds : 5 Expose NSQD by kubectl port-forward . kubectl -n argo-events port-forward service/nsqd 4151:4151 Create topic hello and channel my-channel . curl -X POST 'http://localhost:4151/topic/create?topic=hello' curl -X POST 'http://localhost:4151/channel/create?topic=hello&channel=my-channel' Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nsq.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/nsq.yaml Publish a message on topic hello and channel my-channel . curl -d '{\"message\": \"hello\"}' 'http://localhost:4151/pub?topic=hello&channel=my-channel' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/nsq/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/pulsar/","text":"Pulsar \u00b6 Pulsar event-source subscribes to the topics, listens events and helps sensor trigger the workflows. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"body is the message data\" , \"publishTime\" : \"timestamp of the message\" , \"key\" : \"message key\" } } Specification \u00b6 Pulsar event-source is available here . Setup \u00b6 To test locally, deploy a standalone Pulsar. apiVersion : apps / v1 kind : Deployment metadata : name : pulsar labels : app : pulsar spec : replicas : 1 template : metadata : name : pulsar labels : app : pulsar spec : containers : - name : pulsar image : apachepulsar / pulsar : 2.4 . 1 command : - bin / pulsar - standalone imagePullPolicy : IfNotPresent volumeMounts : - mountPath : /pulsar/ data name : datadir restartPolicy : Always volumes : - name : datadir emptyDir : {} selector : matchLabels : app : pulsar --- apiVersion : v1 kind : Service metadata : name : pulsar spec : selector : app : pulsar ports : - port : 8080 targetPort : 8080 name : http - port : 6650 name : another targetPort : 6650 type : LoadBalancer Port forward to the pulsar pod using kubectl for port 6650. For production deployment, follow the official Pulsar documentation online. Deploy the eventsource. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/pulsar.yaml Deploy the sensor. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/pulsar.yaml Publish a message on topic test . Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Pulsar"},{"location":"eventsources/setup/pulsar/#pulsar","text":"Pulsar event-source subscribes to the topics, listens events and helps sensor trigger the workflows.","title":"Pulsar"},{"location":"eventsources/setup/pulsar/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"body is the message data\" , \"publishTime\" : \"timestamp of the message\" , \"key\" : \"message key\" } }","title":"Event Structure"},{"location":"eventsources/setup/pulsar/#specification","text":"Pulsar event-source is available here .","title":"Specification"},{"location":"eventsources/setup/pulsar/#setup","text":"To test locally, deploy a standalone Pulsar. apiVersion : apps / v1 kind : Deployment metadata : name : pulsar labels : app : pulsar spec : replicas : 1 template : metadata : name : pulsar labels : app : pulsar spec : containers : - name : pulsar image : apachepulsar / pulsar : 2.4 . 1 command : - bin / pulsar - standalone imagePullPolicy : IfNotPresent volumeMounts : - mountPath : /pulsar/ data name : datadir restartPolicy : Always volumes : - name : datadir emptyDir : {} selector : matchLabels : app : pulsar --- apiVersion : v1 kind : Service metadata : name : pulsar spec : selector : app : pulsar ports : - port : 8080 targetPort : 8080 name : http - port : 6650 name : another targetPort : 6650 type : LoadBalancer Port forward to the pulsar pod using kubectl for port 6650. For production deployment, follow the official Pulsar documentation online. Deploy the eventsource. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/pulsar.yaml Deploy the sensor. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/pulsar.yaml Publish a message on topic test . Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/pulsar/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/redis-streams/","text":"Redis Streams \u00b6 Redis stream event-source listens to messages on Redis streams and helps sensor trigger workloads. Messages from the stream are read using the Redis consumer group. The main reason for using consumer group is to resume from the last read upon pod restarts. A common consumer group (defaults to \"argo-events-cg\") is created (if not already exists) on all specified streams. When using consumer group, each read through a consumer group is a write operation, because Redis needs to update the last retrieved message id and the pending entries list(PEL) of that specific user in the consumer group. So it can only work with the master Redis instance and not replicas ( https://redis.io/topics/streams-intro ). Redis stream event source expects all the streams to be present on the Redis server. This event source only starts pulling messages from the streams when all of the specified streams exist on the Redis server. On the initial setup, the consumer group is created on all the specified streams to start reading from the latest message (not necessarily the beginning of the stream). On subsequent setups (the consumer group already exists on the streams) or during pod restarts, messages are pulled from the last unacknowledged message in the stream. The consumer group is never deleted automatically. If you want a completely fresh setup again, you must delete the consumer group from the streams. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" , \"time\" : \"event_time\" }, \"data\" : { \"stream\" : \"Name of the Redis stream\" , \"message_id\" : \"Message Id\" , \"values\" : \"message body\" } } Example: { \"context\" : { \"id\" : \"64313638396337352d623565612d343639302d383262362d306630333562333437363637\" , \"source\" : \"redis-stream\" , \"specversion\" : \"1.0\" , \"type\" : \"redisStream\" , \"datacontenttype\" : \"application/json\" , \"subject\" : \"example\" , \"time\" : \"2022-03-17T04:47:42Z\" }, \"data\" : { \"stream\" : \"FOO\" , \"message_id\" : \"1647495121754-0\" , \"values\" : { \"key-1\" : \"val-1\" , \"key-2\" : \"val-2\" } } } Specification \u00b6 Redis stream event-source specification is available here . Setup \u00b6 Follow the documentation to set up Redis database. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/redis-streams.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/redis-streams.yaml Log into redis pod using kubectl . kubectl - n argo - events exec - it < redis - pod - name > - c < redis - container - name > -- / bin / bash Run redis-cli and publish a message on the stream FOO . XADD FOO * message hello Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Redis stream event source expects all the streams to be present on redis server. It only starts pulling messages from the streams when all of the specified streams exist on the redis server. Please read the FAQ .","title":"Redis Streams"},{"location":"eventsources/setup/redis-streams/#redis-streams","text":"Redis stream event-source listens to messages on Redis streams and helps sensor trigger workloads. Messages from the stream are read using the Redis consumer group. The main reason for using consumer group is to resume from the last read upon pod restarts. A common consumer group (defaults to \"argo-events-cg\") is created (if not already exists) on all specified streams. When using consumer group, each read through a consumer group is a write operation, because Redis needs to update the last retrieved message id and the pending entries list(PEL) of that specific user in the consumer group. So it can only work with the master Redis instance and not replicas ( https://redis.io/topics/streams-intro ). Redis stream event source expects all the streams to be present on the Redis server. This event source only starts pulling messages from the streams when all of the specified streams exist on the Redis server. On the initial setup, the consumer group is created on all the specified streams to start reading from the latest message (not necessarily the beginning of the stream). On subsequent setups (the consumer group already exists on the streams) or during pod restarts, messages are pulled from the last unacknowledged message in the stream. The consumer group is never deleted automatically. If you want a completely fresh setup again, you must delete the consumer group from the streams.","title":"Redis Streams"},{"location":"eventsources/setup/redis-streams/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" , \"time\" : \"event_time\" }, \"data\" : { \"stream\" : \"Name of the Redis stream\" , \"message_id\" : \"Message Id\" , \"values\" : \"message body\" } } Example: { \"context\" : { \"id\" : \"64313638396337352d623565612d343639302d383262362d306630333562333437363637\" , \"source\" : \"redis-stream\" , \"specversion\" : \"1.0\" , \"type\" : \"redisStream\" , \"datacontenttype\" : \"application/json\" , \"subject\" : \"example\" , \"time\" : \"2022-03-17T04:47:42Z\" }, \"data\" : { \"stream\" : \"FOO\" , \"message_id\" : \"1647495121754-0\" , \"values\" : { \"key-1\" : \"val-1\" , \"key-2\" : \"val-2\" } } }","title":"Event Structure"},{"location":"eventsources/setup/redis-streams/#specification","text":"Redis stream event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/redis-streams/#setup","text":"Follow the documentation to set up Redis database. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/redis-streams.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/redis-streams.yaml Log into redis pod using kubectl . kubectl - n argo - events exec - it < redis - pod - name > - c < redis - container - name > -- / bin / bash Run redis-cli and publish a message on the stream FOO . XADD FOO * message hello Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/redis-streams/#troubleshoot","text":"Redis stream event source expects all the streams to be present on redis server. It only starts pulling messages from the streams when all of the specified streams exist on the redis server. Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/redis/","text":"Redis \u00b6 Redis event-source subscribes to Redis publisher and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"channel\" : \"Subscription channel\" , \"pattern\" : \"Message pattern\" , \"body\" : \"message body\" // string } } Specification \u00b6 Redis event-source specification is available here . Setup \u00b6 Follow the documentation to set up Redis database. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/redis.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/redis.yaml Log into redis pod using kubectl . kubectl - n argo - events exec - it < redis - pod - name > - c < redis - container - name > -- / bin / bash Run redis-cli and publish a message on FOO channel. PUBLISH FOO hello Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Redis"},{"location":"eventsources/setup/redis/#redis","text":"Redis event-source subscribes to Redis publisher and helps sensor trigger workloads.","title":"Redis"},{"location":"eventsources/setup/redis/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"channel\" : \"Subscription channel\" , \"pattern\" : \"Message pattern\" , \"body\" : \"message body\" // string } }","title":"Event Structure"},{"location":"eventsources/setup/redis/#specification","text":"Redis event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/redis/#setup","text":"Follow the documentation to set up Redis database. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/redis.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/redis.yaml Log into redis pod using kubectl . kubectl - n argo - events exec - it < redis - pod - name > - c < redis - container - name > -- / bin / bash Run redis-cli and publish a message on FOO channel. PUBLISH FOO hello Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/redis/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/resource/","text":"Resource \u00b6 Resource event-source watches change notifications for K8s object and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"type\" : \"type_of_the_event\" , // ADD, UPDATE or DELETE \"body\" : \"resource_body\" , // JSON format \"group\" : \"resource_group_name\" , \"version\" : \"resource_version_name\" , \"resource\" : \"resource_name\" } } Specification \u00b6 Resource event-source specification is available here . Setup \u00b6 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/resource.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/resource.yaml The event source we created in step 1 contains configuration which makes the event-source listen to Argo workflows marked with label app: my-workflow . Lets create a workflow called my-workflow with label app: my-workflow . apiVersion : argoproj . io / v1alpha1 kind : Workflow metadata : name : my - workflow labels : app : my - workflow spec : entrypoint : whalesay templates : - name : whalesay container : image : docker / whalesay : latest command : [ cowsay ] args : [ \"hello world\" ] Once the my-workflow is created, the sensor will trigger the workflow. Run argo list to list the triggered workflow. List Options \u00b6 The Resource Event-Source allows to configure the list options through labels and field selectors for setting up a watch on objects. In the example above, we had set up the list option as follows, filter : # labels and filters are meant to provide K8s API options to filter the object list that are being watched . # Please read https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # api for more details . # labels provide listing options to K8s API to watch objects labels : - key : app # Supported operations like == , != , etc . # Defaults to == . # Refer https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # label - selectors for more info . # optional . operation : \"==\" value : my - workflow The key-operation-value items under the filter -> labels are used by the event-source to filter the objects that are eligible for the watch. So, in the present case, the event-source will set up a watch for those objects who have label \"app: my-workflow\". You can add more key-operation-value items to the list as per your use-case. Similarly, you can pass field selectors to the watch list options, e.g., filter : # labels and filters are meant to provide K8s API options to filter the object list that are being watched . # Please read https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # api for more details . # fields provide listing options to K8s API to watch objects fields : - key : metadata . name # Supported operations like == , != , <= , >= etc . # Defaults to == . # Refer https : // kubernetes . io / docs / concepts / overview / working - with - objects / field - selectors / for more info . # optional . operation : == value : my - workflow Note: The label and fields under filter are used at the time of setting up the watch by the event-source. If you want to filter the objects based on the annotations or some other fields, use the Data Filters available in the sensor. Troubleshoot \u00b6 Please read the FAQ .","title":"Resource"},{"location":"eventsources/setup/resource/#resource","text":"Resource event-source watches change notifications for K8s object and helps sensor trigger the workloads.","title":"Resource"},{"location":"eventsources/setup/resource/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"type\" : \"type_of_the_event\" , // ADD, UPDATE or DELETE \"body\" : \"resource_body\" , // JSON format \"group\" : \"resource_group_name\" , \"version\" : \"resource_version_name\" , \"resource\" : \"resource_name\" } }","title":"Event Structure"},{"location":"eventsources/setup/resource/#specification","text":"Resource event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/resource/#setup","text":"Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/resource.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/resource.yaml The event source we created in step 1 contains configuration which makes the event-source listen to Argo workflows marked with label app: my-workflow . Lets create a workflow called my-workflow with label app: my-workflow . apiVersion : argoproj . io / v1alpha1 kind : Workflow metadata : name : my - workflow labels : app : my - workflow spec : entrypoint : whalesay templates : - name : whalesay container : image : docker / whalesay : latest command : [ cowsay ] args : [ \"hello world\" ] Once the my-workflow is created, the sensor will trigger the workflow. Run argo list to list the triggered workflow.","title":"Setup"},{"location":"eventsources/setup/resource/#list-options","text":"The Resource Event-Source allows to configure the list options through labels and field selectors for setting up a watch on objects. In the example above, we had set up the list option as follows, filter : # labels and filters are meant to provide K8s API options to filter the object list that are being watched . # Please read https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # api for more details . # labels provide listing options to K8s API to watch objects labels : - key : app # Supported operations like == , != , etc . # Defaults to == . # Refer https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # label - selectors for more info . # optional . operation : \"==\" value : my - workflow The key-operation-value items under the filter -> labels are used by the event-source to filter the objects that are eligible for the watch. So, in the present case, the event-source will set up a watch for those objects who have label \"app: my-workflow\". You can add more key-operation-value items to the list as per your use-case. Similarly, you can pass field selectors to the watch list options, e.g., filter : # labels and filters are meant to provide K8s API options to filter the object list that are being watched . # Please read https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # api for more details . # fields provide listing options to K8s API to watch objects fields : - key : metadata . name # Supported operations like == , != , <= , >= etc . # Defaults to == . # Refer https : // kubernetes . io / docs / concepts / overview / working - with - objects / field - selectors / for more info . # optional . operation : == value : my - workflow Note: The label and fields under filter are used at the time of setting up the watch by the event-source. If you want to filter the objects based on the annotations or some other fields, use the Data Filters available in the sensor.","title":"List Options"},{"location":"eventsources/setup/resource/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/sftp/","text":"SFTP \u00b6 SFTP event-source polls an SFTP server to identify changes and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"name\" : \"Relative path to the file or directory\" , \"op\" : \"File operation that triggered the event\" // Create, Remove } } Specification \u00b6 SFTP event-source specification is available here . Setup \u00b6 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/sftp.yaml The event source has configuration to poll the sftp server every 10 seconds for test-data directory and file(s) called x.txt . Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/sftp.yaml Log into the event-source pod by running following command. kubectl - n argo - events exec - it < event - source - pod - name > - c sftp - events -- / bin / bash Create a file called x.txt under test-data directory on the SFTP server. Once you create file x.txt , the sensor will trigger argo workflow. Run argo list to find the workflow. For real-world use cases, you should use PersistentVolumeClaim. Troubleshoot \u00b6 Please read the FAQ .","title":"SFTP"},{"location":"eventsources/setup/sftp/#sftp","text":"SFTP event-source polls an SFTP server to identify changes and helps sensor trigger workloads.","title":"SFTP"},{"location":"eventsources/setup/sftp/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"name\" : \"Relative path to the file or directory\" , \"op\" : \"File operation that triggered the event\" // Create, Remove } }","title":"Event Structure"},{"location":"eventsources/setup/sftp/#specification","text":"SFTP event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/sftp/#setup","text":"Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/sftp.yaml The event source has configuration to poll the sftp server every 10 seconds for test-data directory and file(s) called x.txt . Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/sftp.yaml Log into the event-source pod by running following command. kubectl - n argo - events exec - it < event - source - pod - name > - c sftp - events -- / bin / bash Create a file called x.txt under test-data directory on the SFTP server. Once you create file x.txt , the sensor will trigger argo workflow. Run argo list to find the workflow. For real-world use cases, you should use PersistentVolumeClaim.","title":"Setup"},{"location":"eventsources/setup/sftp/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/webhook/","text":"Webhook \u00b6 Webhook event-source exposes a http server and allows external entities to trigger workloads via http requests. Event Structure \u00b6 The structure of an event dispatched by the event-source to the sensor looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : { /* the headers from the request received by the event - source from the external entity */ }, \"body\" : { /* the payload of the request received by the event - source from the external entity */ }, } } Specification \u00b6 Webhook event-source specification is available here . Setup \u00b6 Install the event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml The event-source pod is listening for HTTP requests on port 12000 and endpoint /example . It's time to create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml Once the sensor pod is in running state, test the setup by sending a POST request to event-source service. Troubleshoot \u00b6 Please read the FAQ .","title":"Webhook"},{"location":"eventsources/setup/webhook/#webhook","text":"Webhook event-source exposes a http server and allows external entities to trigger workloads via http requests.","title":"Webhook"},{"location":"eventsources/setup/webhook/#event-structure","text":"The structure of an event dispatched by the event-source to the sensor looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : { /* the headers from the request received by the event - source from the external entity */ }, \"body\" : { /* the payload of the request received by the event - source from the external entity */ }, } }","title":"Event Structure"},{"location":"eventsources/setup/webhook/#specification","text":"Webhook event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/webhook/#setup","text":"Install the event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml The event-source pod is listening for HTTP requests on port 12000 and endpoint /example . It's time to create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml Once the sensor pod is in running state, test the setup by sending a POST request to event-source service.","title":"Setup"},{"location":"eventsources/setup/webhook/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"sensors/ha/","text":"Sensor High Availability \u00b6 Sensor controller creates a k8s deployment (replica number defaults to 1) for each Sensor object. HA with Active-Passive strategy can be achieved by setting spec.replicas to a number greater than 1, which means only one Pod serves traffic and the rest ones stand by. One of standby Pods will be automatically elected to be active if the old one is gone. Please DO NOT manually scale up the replicas, that might cause unexpected behaviors! Kubernetes Leader Election \u00b6 By default, Argo Events will use NATS for the HA leader election except when using a Kafka Eventbus, in which case a leader election is not required as a Sensor that uses a Kafka EventBus is capable of horizontally scaling. If using a different EventBus you can opt-in to a Kubernetes native leader election by specifying the following annotation. annotations : events.argoproj.io/leader-election : k8s To use Kubernetes leader election the following RBAC rules need to be associated with the Sensor ServiceAccount. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : argo-events-leaderelection-role rules : - apiGroups : [ \"coordination.k8s.io\" ] resources : [ \"leases\" ] verbs : [ \"get\" , \"create\" , \"update\" ] More \u00b6 Click here to learn more information about Argo Events DR/HA recommendations.","title":"Sensor High Availability"},{"location":"sensors/ha/#sensor-high-availability","text":"Sensor controller creates a k8s deployment (replica number defaults to 1) for each Sensor object. HA with Active-Passive strategy can be achieved by setting spec.replicas to a number greater than 1, which means only one Pod serves traffic and the rest ones stand by. One of standby Pods will be automatically elected to be active if the old one is gone. Please DO NOT manually scale up the replicas, that might cause unexpected behaviors!","title":"Sensor High Availability"},{"location":"sensors/ha/#kubernetes-leader-election","text":"By default, Argo Events will use NATS for the HA leader election except when using a Kafka Eventbus, in which case a leader election is not required as a Sensor that uses a Kafka EventBus is capable of horizontally scaling. If using a different EventBus you can opt-in to a Kubernetes native leader election by specifying the following annotation. annotations : events.argoproj.io/leader-election : k8s To use Kubernetes leader election the following RBAC rules need to be associated with the Sensor ServiceAccount. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : argo-events-leaderelection-role rules : - apiGroups : [ \"coordination.k8s.io\" ] resources : [ \"leases\" ] verbs : [ \"get\" , \"create\" , \"update\" ]","title":"Kubernetes Leader Election"},{"location":"sensors/ha/#more","text":"Click here to learn more information about Argo Events DR/HA recommendations.","title":"More"},{"location":"sensors/more-about-sensors-and-triggers/","text":"More About Sensors And Triggers \u00b6 Multiple Dependencies \u00b6 If there are multiple dependencies defined in the Sensor , you can configure Trigger Conditions to determine what kind of situation could get the trigger executed. For example, there are 2 dependencies A and B are defined, then condition A || B means an event from either A or B will execute the trigger. What happens if A && B is defined? Assume before B has an event b1 delivered, A has already got events a1 - a10 , in this case, a10 and b1 will be used to execute the trigger, and a1 - a9 will be dropped. In short, at the moment Trigger Conditions resolve to true, the latest events from each dependencies will be used to trigger the actions. Duplicate Dependencies \u00b6 Due to technical reasons when using the NATS Streaming bus, the same eventSourceName and eventName combo can not be referenced twice in one Sensor object. For example, the following dependency definitions are not allowed. However, it can be referenced unlimited times in different Sensor objects, so if you do have similar requirements, use 2 Sensor objects instead. spec : dependencies : - name : dep01 eventSourceName : webhook eventName : example filters : data : - path : body.value type : number comparator : \"<\" value : - \"20.0\" - name : dep02 eventSourceName : webhook eventName : example filters : data : - path : body.value type : number comparator : \">\" value : - \"50.0\" Note that this is not an issue for the Jetstream bus, however. Events Delivery Order \u00b6 Following statements are based on using NATS Streaming as the EventBus. In general, the order of events delivered to a Sensor is the order they were published, but there's no guarantee for that. There could be cases that the Sensor fails to acknowledge the first message, and then succeeds to acknowledge the second one before the first one is redelivered. Events Delivery Guarantee \u00b6 NATS Streaming offers at-least-once delivery guarantee. Jetstream has additional features that get closer to \"exactly once\". In addition, in the Sensor application, an in-memory cache is implemented to cache the events IDs delivered in the last 5 minutes: this is used to make sure there won't be any duplicate events delivered. Based on this, we are able to achieve 1) \"exactly once\" in almost all cases, with the exception of pods dying while processing messages, and 2) \"at least once\" in all cases. Trigger Retries \u00b6 By default, there's no retry for the trigger execution, this is based on the fact that Sensor has no idea if failure retry would bring any unexpected results. If you prefer to have retry for the trigger , add retryStrategy to the spec. spec : triggers : - template : name : http-trigger http : url : https://xxxxx.com/ method : GET retryStrategy : # Give up after this many times steps : 3 Or if you want more control on the retries: spec : triggers : - retryStrategy : # Give up after this many times steps : 3 # The initial duration, use strings like \"2s\", \"1m\" duration : 2s # Duration is multiplied by factor each retry, if factor is not zero # and steps limit has not been reached. # Should not be negative # # Defaults to \"1.0\" factor : 2.0 # The sleep between each retry is the duration plus an additional # amount chosen uniformly at random from the interval between # zero and `jitter * duration`. # # Defaults to \"1\" jitter : 2 Trigger Rate Limit \u00b6 There's no rate limit for a trigger unless you configure the spec as following: spec : triggers : - rateLimit : # Second, Minute or Hour, defaults to Second unit : Second # Requests per unit requestsPerUnit : 20 Revision History Limit \u00b6 Optionally, a revisionHistoryLimit may be configured in the spec as following: spec : # Optional revisionHistoryLimit : 3 Dead Letter Queue Trigger \u00b6 To help avoid data loss and dropping a message on failure after all the retries are exhausted, optionally, a dlqTrigger may be configured as following to invoke any of the 10+ triggers : spec : triggers : - template : name : http-trigger http : url : https://xxxxx.com/ method : GET # must be true for dlqTrigger atLeastOnce : true retryStrategy : steps : 3 dlqTrigger : template : name : dlq-http-trigger http : url : https://xxxxx.com/ method : PUT # must be true for dlqTrigger atLeastOnce : true # retries the dlqTrigger 5 times retryStrategy : steps : 5 If the trigger fails, it will retry up to the configured number of retries based on retryStrategy . If the maximum retries are reached and the trigger, the dlqTrigger will be invoked if specified. In order to use the dlqTrigger , the atLeastOnce must be set to true within the trigger and the dlqTrigger for the Sensor to know about the failure and invoke the dlqTrigger . note: dlqTrigger is only available for the top level trigger and not *recursively within the dlqTrigger template.","title":"More Information"},{"location":"sensors/more-about-sensors-and-triggers/#more-about-sensors-and-triggers","text":"","title":"More About Sensors And Triggers"},{"location":"sensors/more-about-sensors-and-triggers/#multiple-dependencies","text":"If there are multiple dependencies defined in the Sensor , you can configure Trigger Conditions to determine what kind of situation could get the trigger executed. For example, there are 2 dependencies A and B are defined, then condition A || B means an event from either A or B will execute the trigger. What happens if A && B is defined? Assume before B has an event b1 delivered, A has already got events a1 - a10 , in this case, a10 and b1 will be used to execute the trigger, and a1 - a9 will be dropped. In short, at the moment Trigger Conditions resolve to true, the latest events from each dependencies will be used to trigger the actions.","title":"Multiple Dependencies"},{"location":"sensors/more-about-sensors-and-triggers/#duplicate-dependencies","text":"Due to technical reasons when using the NATS Streaming bus, the same eventSourceName and eventName combo can not be referenced twice in one Sensor object. For example, the following dependency definitions are not allowed. However, it can be referenced unlimited times in different Sensor objects, so if you do have similar requirements, use 2 Sensor objects instead. spec : dependencies : - name : dep01 eventSourceName : webhook eventName : example filters : data : - path : body.value type : number comparator : \"<\" value : - \"20.0\" - name : dep02 eventSourceName : webhook eventName : example filters : data : - path : body.value type : number comparator : \">\" value : - \"50.0\" Note that this is not an issue for the Jetstream bus, however.","title":"Duplicate Dependencies"},{"location":"sensors/more-about-sensors-and-triggers/#events-delivery-order","text":"Following statements are based on using NATS Streaming as the EventBus. In general, the order of events delivered to a Sensor is the order they were published, but there's no guarantee for that. There could be cases that the Sensor fails to acknowledge the first message, and then succeeds to acknowledge the second one before the first one is redelivered.","title":"Events Delivery Order"},{"location":"sensors/more-about-sensors-and-triggers/#events-delivery-guarantee","text":"NATS Streaming offers at-least-once delivery guarantee. Jetstream has additional features that get closer to \"exactly once\". In addition, in the Sensor application, an in-memory cache is implemented to cache the events IDs delivered in the last 5 minutes: this is used to make sure there won't be any duplicate events delivered. Based on this, we are able to achieve 1) \"exactly once\" in almost all cases, with the exception of pods dying while processing messages, and 2) \"at least once\" in all cases.","title":"Events Delivery Guarantee"},{"location":"sensors/more-about-sensors-and-triggers/#trigger-retries","text":"By default, there's no retry for the trigger execution, this is based on the fact that Sensor has no idea if failure retry would bring any unexpected results. If you prefer to have retry for the trigger , add retryStrategy to the spec. spec : triggers : - template : name : http-trigger http : url : https://xxxxx.com/ method : GET retryStrategy : # Give up after this many times steps : 3 Or if you want more control on the retries: spec : triggers : - retryStrategy : # Give up after this many times steps : 3 # The initial duration, use strings like \"2s\", \"1m\" duration : 2s # Duration is multiplied by factor each retry, if factor is not zero # and steps limit has not been reached. # Should not be negative # # Defaults to \"1.0\" factor : 2.0 # The sleep between each retry is the duration plus an additional # amount chosen uniformly at random from the interval between # zero and `jitter * duration`. # # Defaults to \"1\" jitter : 2","title":"Trigger Retries"},{"location":"sensors/more-about-sensors-and-triggers/#trigger-rate-limit","text":"There's no rate limit for a trigger unless you configure the spec as following: spec : triggers : - rateLimit : # Second, Minute or Hour, defaults to Second unit : Second # Requests per unit requestsPerUnit : 20","title":"Trigger Rate Limit"},{"location":"sensors/more-about-sensors-and-triggers/#revision-history-limit","text":"Optionally, a revisionHistoryLimit may be configured in the spec as following: spec : # Optional revisionHistoryLimit : 3","title":"Revision History Limit"},{"location":"sensors/more-about-sensors-and-triggers/#dead-letter-queue-trigger","text":"To help avoid data loss and dropping a message on failure after all the retries are exhausted, optionally, a dlqTrigger may be configured as following to invoke any of the 10+ triggers : spec : triggers : - template : name : http-trigger http : url : https://xxxxx.com/ method : GET # must be true for dlqTrigger atLeastOnce : true retryStrategy : steps : 3 dlqTrigger : template : name : dlq-http-trigger http : url : https://xxxxx.com/ method : PUT # must be true for dlqTrigger atLeastOnce : true # retries the dlqTrigger 5 times retryStrategy : steps : 5 If the trigger fails, it will retry up to the configured number of retries based on retryStrategy . If the maximum retries are reached and the trigger, the dlqTrigger will be invoked if specified. In order to use the dlqTrigger , the atLeastOnce must be set to true within the trigger and the dlqTrigger for the Sensor to know about the failure and invoke the dlqTrigger . note: dlqTrigger is only available for the top level trigger and not *recursively within the dlqTrigger template.","title":"Dead Letter Queue Trigger"},{"location":"sensors/transform/","text":"Event Transformation \u00b6 Available after v1.6.0 Lua Script: Executes user-defined Lua script to transform the event. JQ Command: Evaluates JQ command to transform the event. We use https://github.com/itchyny/gojq to evaluate JQ commands. Note \u00b6 If set, transformations are applied to the event before the filters are applied. Either a Lua script or a JQ command can be used for the transformation, not both. Only event data is available for the transformation and not the context. The event is discarded if the transformation fails. Lua Script \u00b6 apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example transform : script : |- event.body.message='updated' return event triggers : - template : name : webhook-workflow-trigger conditions : \"test-dep\" k8s : operation : create source : resource : apiVersion : argoproj.io/v1alpha1 kind : Workflow metadata : generateName : webhook- spec : entrypoint : whalesay arguments : parameters : - name : message # the value will get overridden by event payload from test-dep value : hello world templates : - name : whalesay inputs : parameters : - name : message container : image : docker/whalesay:latest command : [ cowsay ] args : [ \"{{inputs.parameters.message}}\" ] parameters : - src : dependencyName : test-dep dataKey : body dest : spec.arguments.parameters.0.value transform.script field defines the Lua script that gets executed when an event is received. The event data is available to Lua execution context via a global variable called event . The above script sets the value of body.message field within the event data to a new value called updated and returns the event. The type of the event variable is Table and the script must return a Table representing a valid JSON object. JQ Command \u00b6 apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example transform : jq : \".body.message *= 2\" triggers : - template : name : webhook-workflow-trigger-1 conditions : \"test-dep-foo\" k8s : operation : create source : resource : apiVersion : argoproj.io/v1alpha1 kind : Workflow metadata : generateName : webhook- spec : entrypoint : whalesay arguments : parameters : - name : message # the value will get overridden by event payload from test-dep value : hello world templates : - name : whalesay inputs : parameters : - name : message container : image : docker/whalesay:latest command : [ cowsay ] args : [ \"{{inputs.parameters.message}}\" ] parameters : - src : dependencyName : test-dep dataKey : body dest : spec.arguments.parameters.0.value The above script applies a JQ command .body.message *= 2 on the event data which appends the value of .body.message to itself and return the event. The output of the transformation must be a valid JSON object.","title":"Event Transformation"},{"location":"sensors/transform/#event-transformation","text":"Available after v1.6.0 Lua Script: Executes user-defined Lua script to transform the event. JQ Command: Evaluates JQ command to transform the event. We use https://github.com/itchyny/gojq to evaluate JQ commands.","title":"Event Transformation"},{"location":"sensors/transform/#note","text":"If set, transformations are applied to the event before the filters are applied. Either a Lua script or a JQ command can be used for the transformation, not both. Only event data is available for the transformation and not the context. The event is discarded if the transformation fails.","title":"Note"},{"location":"sensors/transform/#lua-script","text":"apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example transform : script : |- event.body.message='updated' return event triggers : - template : name : webhook-workflow-trigger conditions : \"test-dep\" k8s : operation : create source : resource : apiVersion : argoproj.io/v1alpha1 kind : Workflow metadata : generateName : webhook- spec : entrypoint : whalesay arguments : parameters : - name : message # the value will get overridden by event payload from test-dep value : hello world templates : - name : whalesay inputs : parameters : - name : message container : image : docker/whalesay:latest command : [ cowsay ] args : [ \"{{inputs.parameters.message}}\" ] parameters : - src : dependencyName : test-dep dataKey : body dest : spec.arguments.parameters.0.value transform.script field defines the Lua script that gets executed when an event is received. The event data is available to Lua execution context via a global variable called event . The above script sets the value of body.message field within the event data to a new value called updated and returns the event. The type of the event variable is Table and the script must return a Table representing a valid JSON object.","title":"Lua Script"},{"location":"sensors/transform/#jq-command","text":"apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example transform : jq : \".body.message *= 2\" triggers : - template : name : webhook-workflow-trigger-1 conditions : \"test-dep-foo\" k8s : operation : create source : resource : apiVersion : argoproj.io/v1alpha1 kind : Workflow metadata : generateName : webhook- spec : entrypoint : whalesay arguments : parameters : - name : message # the value will get overridden by event payload from test-dep value : hello world templates : - name : whalesay inputs : parameters : - name : message container : image : docker/whalesay:latest command : [ cowsay ] args : [ \"{{inputs.parameters.message}}\" ] parameters : - src : dependencyName : test-dep dataKey : body dest : spec.arguments.parameters.0.value The above script applies a JQ command .body.message *= 2 on the event data which appends the value of .body.message to itself and return the event. The output of the transformation must be a valid JSON object.","title":"JQ Command"},{"location":"sensors/trigger-conditions/","text":"Trigger Conditions \u00b6 v1.0 and after Triggers can be executed based on different dependency conditions . An example with conditions : apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : example spec : dependencies : - name : dep01 eventSourceName : webhook-a eventName : example01 - name : dep02 eventSourceName : webhook-a eventName : example02 - name : dep03 eventSourceName : webhook-b eventName : example03 triggers : - template : conditions : \"dep02\" name : trigger01 http : url : http://abc.com/hello1 method : GET - template : conditions : \"dep02 && dep03\" name : trigger02 http : url : http://abc.com/hello2 method : GET - template : conditions : \"(dep01 || dep02) && dep03\" name : trigger03 http : url : http://abc.com/hello3 method : GET Conditions is a boolean expression contains dependency names, the trigger won't be executed until the expression resolves to true. The operators in conditions include: && || Triggers Without Conditions \u00b6 If conditions is missing, the default conditions to execute the trigger is && logic of all the defined dependencies. Conditions Reset \u00b6 When multiple dependencies are defined for a trigger, the trigger won't be executed until the condition expression is resolved to true . Sometimes you might want to reset all the stakeholders of the conditions, conditions reset is the way to do it. For example, your trigger has a condition as A && B , both A and B are expected to have an event everyday. One day for some reason, A gets an event but B doesn't, then it ends up with today's A and tomorrow's B triggering an action, which might not be something you want. To avoid that, you can reset the conditions as following: spec : triggers : - template : conditions : \"dep01 && dep02\" conditionsReset : - byTime : # Reset conditions at 23:59 cron : \"59 23 * * *\" # Optional, defaults to UTC # More info for timezone: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones timezone : America/Los_Angeles name : trigger01","title":"Trigger Conditions"},{"location":"sensors/trigger-conditions/#trigger-conditions","text":"v1.0 and after Triggers can be executed based on different dependency conditions . An example with conditions : apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : example spec : dependencies : - name : dep01 eventSourceName : webhook-a eventName : example01 - name : dep02 eventSourceName : webhook-a eventName : example02 - name : dep03 eventSourceName : webhook-b eventName : example03 triggers : - template : conditions : \"dep02\" name : trigger01 http : url : http://abc.com/hello1 method : GET - template : conditions : \"dep02 && dep03\" name : trigger02 http : url : http://abc.com/hello2 method : GET - template : conditions : \"(dep01 || dep02) && dep03\" name : trigger03 http : url : http://abc.com/hello3 method : GET Conditions is a boolean expression contains dependency names, the trigger won't be executed until the expression resolves to true. The operators in conditions include: && ||","title":"Trigger Conditions"},{"location":"sensors/trigger-conditions/#triggers-without-conditions","text":"If conditions is missing, the default conditions to execute the trigger is && logic of all the defined dependencies.","title":"Triggers Without Conditions"},{"location":"sensors/trigger-conditions/#conditions-reset","text":"When multiple dependencies are defined for a trigger, the trigger won't be executed until the condition expression is resolved to true . Sometimes you might want to reset all the stakeholders of the conditions, conditions reset is the way to do it. For example, your trigger has a condition as A && B , both A and B are expected to have an event everyday. One day for some reason, A gets an event but B doesn't, then it ends up with today's A and tomorrow's B triggering an action, which might not be something you want. To avoid that, you can reset the conditions as following: spec : triggers : - template : conditions : \"dep01 && dep02\" conditionsReset : - byTime : # Reset conditions at 23:59 cron : \"59 23 * * *\" # Optional, defaults to UTC # More info for timezone: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones timezone : America/Los_Angeles name : trigger01","title":"Conditions Reset"},{"location":"sensors/filters/ctx/","text":"Context Filter \u00b6 Context filter is applied to the event context. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Fields \u00b6 Context filter has following fields: filters : context : type : event_type subject : event_subject source : event_source datacontenttype : event_data_content_type You can also specify id, specversion and time fields in the YAML manifest, but they are ignored in filtering. Note It could be useless to build a context filter based on datacontenttype , source and subject as currently they come fixed from event-source: datacontenttype is always application/json source corresponds to eventSourceName specified in the Sensor YAML manifest subject corresponds to eventName specified in the Sensor YAML manifest How it works \u00b6 Specify one or more of the available context fields: apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : with-ctx-filter spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example filters : context : source : custom-webhook Practical example \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with context filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-context.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as the sensor expects custom-webhook as the value of the source Further examples \u00b6 You can find some examples here .","title":"Context Filter"},{"location":"sensors/filters/ctx/#context-filter","text":"Context filter is applied to the event context. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } }","title":"Context Filter"},{"location":"sensors/filters/ctx/#fields","text":"Context filter has following fields: filters : context : type : event_type subject : event_subject source : event_source datacontenttype : event_data_content_type You can also specify id, specversion and time fields in the YAML manifest, but they are ignored in filtering. Note It could be useless to build a context filter based on datacontenttype , source and subject as currently they come fixed from event-source: datacontenttype is always application/json source corresponds to eventSourceName specified in the Sensor YAML manifest subject corresponds to eventName specified in the Sensor YAML manifest","title":"Fields"},{"location":"sensors/filters/ctx/#how-it-works","text":"Specify one or more of the available context fields: apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : with-ctx-filter spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example filters : context : source : custom-webhook","title":"How it works"},{"location":"sensors/filters/ctx/#practical-example","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with context filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-context.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as the sensor expects custom-webhook as the value of the source","title":"Practical example"},{"location":"sensors/filters/ctx/#further-examples","text":"You can find some examples here .","title":"Further examples"},{"location":"sensors/filters/data/","text":"Data Filter \u00b6 Data filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Data filters are applied on data within the payload. Fields \u00b6 A data filter has following fields: filters : dataLogicalOperator : logical_operator_applied data : - path : path_within_event_data type : types_of_the_data comparator : numeric_comparator value : - list_of_possible_values \u26a0\ufe0f PLEASE NOTE order in which data filters are declared corresponds to the order in which the Sensor will evaluate them. Logical operator \u00b6 Data filters can be evaluated together in 2 ways: and , meaning that all data filters returning true are required for an event to be valid or , meaning that only one data filter returning true is enough for an event to be valid Any kind of error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with dataLogicalOperator field in a Sensor dependency filters, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : data-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filters : dataLogicalOperator : \"or\" data : - path : \"a\" type : \"bool\" value : - \"true\" - path : \"b.c\" type : \"number\" value : - \"3.14\" - path : \"b.d\" type : \"string\" value : - \"hello there\" # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Data logical operator values must be lower case . How it works \u00b6 Comparator \u00b6 The data filter offers following comparators : >= > = != < <= e.g. filters : data : - path : body.value type : number comparator : \">\" value : - \"50.0\" Note : If data type is string , you can pass either an exact value or a regex. In any case that value will be evaluated as a regex. If data types is bool or float , you have to pass an exact value. Multiple paths \u00b6 If the HTTP request was less simple and contained multiple paths that you would like to filter against, you can use multipaths to combine multiple data paths in the payload into one string. For a given payload such as: { \"body\" : { \"action\" : \"opened\" , \"labels\" : [ { \"id\" : \"1234\" , \"name\" : \"Webhook\" }, { \"id\" : \"5678\" , \"name\" : \"Approved\" } ] } } We want our sensor to fire if the action is \"opened\" and it has a label of \"Webhook\" or if the action is \"closed\" and it has a label of \"Webhook\" and \"Approved\". The path would look like body.action,body.labels.#(name==\"Webhook\").name,body.labels.#(name==\"Approved\").name This would return a string like: \"opened\",\"Webhook\" or \"closed\",\"Webhook\",\"Approved\" .\\ As the resulting data type will be a string , we can pass a regex over it: filters : data : - path : 'body.action,body.labels.#(name==\"Webhook\").name,body.labels.#(name==\"Approved\").name' type : string value : - '\"opened\",\"Webhook\"' - '\"closed\",\"Webhook\",\"Approved\"' Template \u00b6 template process the incoming data defined in path through sprig template before matching with the value . e.g. filters : data : - path : body.message type : string value : - \"hello world\" template : \"{{ b64dec .Input }}\" The message '{\"message\":\"aGVsbG8gd29ybGQ=\"}' will match with the above filter definition. Note : Data type is assumed to be string before applying the template , then cast to the user defined type for value matching. Practical examples (comparator) \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with data filter kubectl - n argo - events apply - f https : //raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-data-simple-1.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as it expects for either hello or hey as the value of body.message Send another HTTP request to event-source curl -d '{\"message\":\"hello\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Look for a workflow with name starting with data-workflow- Further examples \u00b6 You can find some examples here .","title":"Data Filter"},{"location":"sensors/filters/data/#data-filter","text":"Data filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Data filters are applied on data within the payload.","title":"Data Filter"},{"location":"sensors/filters/data/#fields","text":"A data filter has following fields: filters : dataLogicalOperator : logical_operator_applied data : - path : path_within_event_data type : types_of_the_data comparator : numeric_comparator value : - list_of_possible_values \u26a0\ufe0f PLEASE NOTE order in which data filters are declared corresponds to the order in which the Sensor will evaluate them.","title":"Fields"},{"location":"sensors/filters/data/#logical-operator","text":"Data filters can be evaluated together in 2 ways: and , meaning that all data filters returning true are required for an event to be valid or , meaning that only one data filter returning true is enough for an event to be valid Any kind of error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with dataLogicalOperator field in a Sensor dependency filters, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : data-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filters : dataLogicalOperator : \"or\" data : - path : \"a\" type : \"bool\" value : - \"true\" - path : \"b.c\" type : \"number\" value : - \"3.14\" - path : \"b.d\" type : \"string\" value : - \"hello there\" # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Data logical operator values must be lower case .","title":"Logical operator"},{"location":"sensors/filters/data/#how-it-works","text":"","title":"How it works"},{"location":"sensors/filters/data/#comparator","text":"The data filter offers following comparators : >= > = != < <= e.g. filters : data : - path : body.value type : number comparator : \">\" value : - \"50.0\" Note : If data type is string , you can pass either an exact value or a regex. In any case that value will be evaluated as a regex. If data types is bool or float , you have to pass an exact value.","title":"Comparator"},{"location":"sensors/filters/data/#multiple-paths","text":"If the HTTP request was less simple and contained multiple paths that you would like to filter against, you can use multipaths to combine multiple data paths in the payload into one string. For a given payload such as: { \"body\" : { \"action\" : \"opened\" , \"labels\" : [ { \"id\" : \"1234\" , \"name\" : \"Webhook\" }, { \"id\" : \"5678\" , \"name\" : \"Approved\" } ] } } We want our sensor to fire if the action is \"opened\" and it has a label of \"Webhook\" or if the action is \"closed\" and it has a label of \"Webhook\" and \"Approved\". The path would look like body.action,body.labels.#(name==\"Webhook\").name,body.labels.#(name==\"Approved\").name This would return a string like: \"opened\",\"Webhook\" or \"closed\",\"Webhook\",\"Approved\" .\\ As the resulting data type will be a string , we can pass a regex over it: filters : data : - path : 'body.action,body.labels.#(name==\"Webhook\").name,body.labels.#(name==\"Approved\").name' type : string value : - '\"opened\",\"Webhook\"' - '\"closed\",\"Webhook\",\"Approved\"'","title":"Multiple paths"},{"location":"sensors/filters/data/#template","text":"template process the incoming data defined in path through sprig template before matching with the value . e.g. filters : data : - path : body.message type : string value : - \"hello world\" template : \"{{ b64dec .Input }}\" The message '{\"message\":\"aGVsbG8gd29ybGQ=\"}' will match with the above filter definition. Note : Data type is assumed to be string before applying the template , then cast to the user defined type for value matching.","title":"Template"},{"location":"sensors/filters/data/#practical-examples-comparator","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with data filter kubectl - n argo - events apply - f https : //raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-data-simple-1.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as it expects for either hello or hey as the value of body.message Send another HTTP request to event-source curl -d '{\"message\":\"hello\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Look for a workflow with name starting with data-workflow-","title":"Practical examples (comparator)"},{"location":"sensors/filters/data/#further-examples","text":"You can find some examples here .","title":"Further examples"},{"location":"sensors/filters/expr/","text":"Expr filter \u00b6 Expr filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Expr filters are applied on data within the payload. Fields \u00b6 An expr filter has following fields: filters : exprLogicalOperator : logical_operator_applied exprs : - expr : expression_to_evaluate fields : - name : parameter_name path : path_to_parameter_value \u26a0\ufe0f PLEASE NOTE order in which expr filters are declared corresponds to the order in which the Sensor will evaluate them. Logical operator \u00b6 Expr filters can be evaluated together in 2 ways: and , meaning that all expr filters returning true are required for an event to be valid or , meaning that only one expr filter returning true is enough for an event to be valid Any kind of error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with exprLogicalOperator field in a Sensor dependency filters, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : data-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filters : exprLogicalOperator : \"or\" exprs : - expr : a == \"b\" || c != 10 fields : - name : a path : a - name : c path : c - expr : e == false fields : - name : e path : d.e # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Expr logical operator values must be lower case . How it works \u00b6 The expr field defines the expression to be evaluated. The fields stanza defines name and path of each parameter used in the expression. name is arbitrary and used in the expr , path defines how to find the value in the data payload then to be assigned to a parameter. The expr filter evaluates the expression contained in expr using govaluate . This library leverages an incredible flexibility and power. With govaluate we are able to define complex combination of arithmetic ( - , * , / , ** , % ), negation ( - ), inversion ( ! ), bitwise not ( ~ ), logical ( && , || ), ternary conditional ( ? , : ) operators, together with comparators ( > , < , >= , <= ), comma-separated arrays and custom functions. Here some examples: action =~ \"start\" action == \"end\" && started == true action =~ \"start\" || (started == true && instances == 2) To discover all options offered by govaluate, take a look at its manual . Practical example \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with expr filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-expressions.yaml Send an HTTP request to event-source curl -d '{ \"a\": \"b\", \"c\": 11, \"d\": { \"e\": true } }' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as the sensor expects e == false Send another HTTP request to event-source curl -d '{ \"a\": \"b\", \"c\": 11, \"d\": { \"e\": false } }' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Look for a workflow with name starting with expr-workflow- Further examples \u00b6 You can find some examples here .","title":"Expr filter"},{"location":"sensors/filters/expr/#expr-filter","text":"Expr filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Expr filters are applied on data within the payload.","title":"Expr filter"},{"location":"sensors/filters/expr/#fields","text":"An expr filter has following fields: filters : exprLogicalOperator : logical_operator_applied exprs : - expr : expression_to_evaluate fields : - name : parameter_name path : path_to_parameter_value \u26a0\ufe0f PLEASE NOTE order in which expr filters are declared corresponds to the order in which the Sensor will evaluate them.","title":"Fields"},{"location":"sensors/filters/expr/#logical-operator","text":"Expr filters can be evaluated together in 2 ways: and , meaning that all expr filters returning true are required for an event to be valid or , meaning that only one expr filter returning true is enough for an event to be valid Any kind of error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with exprLogicalOperator field in a Sensor dependency filters, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : data-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filters : exprLogicalOperator : \"or\" exprs : - expr : a == \"b\" || c != 10 fields : - name : a path : a - name : c path : c - expr : e == false fields : - name : e path : d.e # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Expr logical operator values must be lower case .","title":"Logical operator"},{"location":"sensors/filters/expr/#how-it-works","text":"The expr field defines the expression to be evaluated. The fields stanza defines name and path of each parameter used in the expression. name is arbitrary and used in the expr , path defines how to find the value in the data payload then to be assigned to a parameter. The expr filter evaluates the expression contained in expr using govaluate . This library leverages an incredible flexibility and power. With govaluate we are able to define complex combination of arithmetic ( - , * , / , ** , % ), negation ( - ), inversion ( ! ), bitwise not ( ~ ), logical ( && , || ), ternary conditional ( ? , : ) operators, together with comparators ( > , < , >= , <= ), comma-separated arrays and custom functions. Here some examples: action =~ \"start\" action == \"end\" && started == true action =~ \"start\" || (started == true && instances == 2) To discover all options offered by govaluate, take a look at its manual .","title":"How it works"},{"location":"sensors/filters/expr/#practical-example","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with expr filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-expressions.yaml Send an HTTP request to event-source curl -d '{ \"a\": \"b\", \"c\": 11, \"d\": { \"e\": true } }' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as the sensor expects e == false Send another HTTP request to event-source curl -d '{ \"a\": \"b\", \"c\": 11, \"d\": { \"e\": false } }' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Look for a workflow with name starting with expr-workflow-","title":"Practical example"},{"location":"sensors/filters/expr/#further-examples","text":"You can find some examples here .","title":"Further examples"},{"location":"sensors/filters/intro/","text":"Introduction \u00b6 Filters provide a powerful mechanism to apply constraints on the events in order to determine a validity. If filters determine an event is valid, this will trigger the action defined by the Sensor. If filters determine an event is not valid, this won't trigger any action. Types \u00b6 Argo Events offers 5 types of filters: Expr Filter Data Filter Script Filter Context Filter Time Filter \u26a0\ufe0f PLEASE NOTE this is the order in which Sensor evaluates filter types: expr, data, context, time. Logical operator \u00b6 Filter types can be evaluated together in 2 ways: and , meaning that all filters returning true are required for an event to be valid or , meaning that only one filter returning true is enough for an event to be valid Any kind of filter error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with filtersLogicalOperator field in a Sensor dependency, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : multiple-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filtersLogicalOperator : \"or\" filters : # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Logical operator values must be lower case . Examples \u00b6 You can find some examples here .","title":"Introduction"},{"location":"sensors/filters/intro/#introduction","text":"Filters provide a powerful mechanism to apply constraints on the events in order to determine a validity. If filters determine an event is valid, this will trigger the action defined by the Sensor. If filters determine an event is not valid, this won't trigger any action.","title":"Introduction"},{"location":"sensors/filters/intro/#types","text":"Argo Events offers 5 types of filters: Expr Filter Data Filter Script Filter Context Filter Time Filter \u26a0\ufe0f PLEASE NOTE this is the order in which Sensor evaluates filter types: expr, data, context, time.","title":"Types"},{"location":"sensors/filters/intro/#logical-operator","text":"Filter types can be evaluated together in 2 ways: and , meaning that all filters returning true are required for an event to be valid or , meaning that only one filter returning true is enough for an event to be valid Any kind of filter error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with filtersLogicalOperator field in a Sensor dependency, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : multiple-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filtersLogicalOperator : \"or\" filters : # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Logical operator values must be lower case .","title":"Logical operator"},{"location":"sensors/filters/intro/#examples","text":"You can find some examples here .","title":"Examples"},{"location":"sensors/filters/script/","text":"Script filter \u00b6 Script filters can be used to filter the events with LUA scripts. Script filters are applied to the event data . A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {} } } Fields \u00b6 An Script filter can be defined under filters with a field script : filters : script : |- if event.body.a == \"b\" and event.body.d.e == \"z\" then return true else return false end Practical example \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with context filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-script.yaml Send an HTTP request to the event-source kubectl port-forward svc/webhook-eventsource-svc 12000 curl -d '{\"hello\": \"world\"}' -X POST http://localhost:12000/example You will notice in sensor logs that the event did not trigger anything. Send another HTTP request the event-source curl -X POST -d '{\"a\": \"b\", \"d\": {\"e\": \"z\"}}' http://localhost:12000/example Then you will see the event successfully triggered a workflow creation.","title":"Script filter"},{"location":"sensors/filters/script/#script-filter","text":"Script filters can be used to filter the events with LUA scripts. Script filters are applied to the event data . A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {} } }","title":"Script filter"},{"location":"sensors/filters/script/#fields","text":"An Script filter can be defined under filters with a field script : filters : script : |- if event.body.a == \"b\" and event.body.d.e == \"z\" then return true else return false end","title":"Fields"},{"location":"sensors/filters/script/#practical-example","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with context filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-script.yaml Send an HTTP request to the event-source kubectl port-forward svc/webhook-eventsource-svc 12000 curl -d '{\"hello\": \"world\"}' -X POST http://localhost:12000/example You will notice in sensor logs that the event did not trigger anything. Send another HTTP request the event-source curl -X POST -d '{\"a\": \"b\", \"d\": {\"e\": \"z\"}}' http://localhost:12000/example Then you will see the event successfully triggered a workflow creation.","title":"Practical example"},{"location":"sensors/filters/time/","text":"Time Filter \u00b6 Time filter is applied to the event time, contained in the event context. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } It filters out events occurring outside the specified time range, so it is specially helpful when you need to make sure an event occurs between a certain time-frame. Fields \u00b6 Time filter has following fields: filters : time : start : time_range_start_utc stop : time_range_end_utc How it works \u00b6 Time filter takes a start and stop time in HH:MM:SS format in UTC. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : with-time-filter spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example filters : time : start : \"02:30:00\" stop : \"04:30:00\" If stop is smaller than start ( stop < start ), the stop time is treated as next day of start . Note : start is inclusive while stop is exclusive. Time filter behaviour visually explained \u00b6 if start < stop : event time must be in [start, stop) . 00:00:00 00:00:00 00:00:00 \u2503 start stop \u2503 start stop \u2503 \u2500\u2538\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2538\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2538\u2500 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f if stop < start : event time must be in [start, stop@Next day) (this is equivalent to: event time must be in [00:00:00, stop) || [start, 00:00:00@Next day) ). 00:00:00 00:00:00 00:00:00 \u2503 stop start \u2503 stop start \u2503 \u2500\u2538\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2538\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2538\u2500 \u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500 Practical example \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with time filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-time.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice one of following behaviours: if you run this example between 02:30 and 04:30, the sensor logs the event is valid if you run this example outside time range between 02:30 and 04:30, the sensor logs the event is invalid Further examples \u00b6 You can find some examples here .","title":"Time Filter"},{"location":"sensors/filters/time/#time-filter","text":"Time filter is applied to the event time, contained in the event context. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } It filters out events occurring outside the specified time range, so it is specially helpful when you need to make sure an event occurs between a certain time-frame.","title":"Time Filter"},{"location":"sensors/filters/time/#fields","text":"Time filter has following fields: filters : time : start : time_range_start_utc stop : time_range_end_utc","title":"Fields"},{"location":"sensors/filters/time/#how-it-works","text":"Time filter takes a start and stop time in HH:MM:SS format in UTC. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : with-time-filter spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example filters : time : start : \"02:30:00\" stop : \"04:30:00\" If stop is smaller than start ( stop < start ), the stop time is treated as next day of start . Note : start is inclusive while stop is exclusive.","title":"How it works"},{"location":"sensors/filters/time/#time-filter-behaviour-visually-explained","text":"if start < stop : event time must be in [start, stop) . 00:00:00 00:00:00 00:00:00 \u2503 start stop \u2503 start stop \u2503 \u2500\u2538\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2538\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2538\u2500 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f if stop < start : event time must be in [start, stop@Next day) (this is equivalent to: event time must be in [00:00:00, stop) || [start, 00:00:00@Next day) ). 00:00:00 00:00:00 00:00:00 \u2503 stop start \u2503 stop start \u2503 \u2500\u2538\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2538\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2538\u2500 \u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500","title":"Time filter behaviour visually explained"},{"location":"sensors/filters/time/#practical-example","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with time filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-time.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice one of following behaviours: if you run this example between 02:30 and 04:30, the sensor logs the event is valid if you run this example outside time range between 02:30 and 04:30, the sensor logs the event is invalid","title":"Practical example"},{"location":"sensors/filters/time/#further-examples","text":"You can find some examples here .","title":"Further examples"},{"location":"sensors/triggers/argo-workflow/","text":"Argo Workflow Trigger \u00b6 Argo workflow is K8s custom resource which help orchestrating parallel jobs on Kubernetes. Trigger a workflow \u00b6 Note: You will need to have Argo Workflows installed to make this work. Make sure to have the eventbus deployed in the namespace. We will use webhook event-source and sensor to trigger an Argo workflow. Set up the operate-workflow-sa service account that the sensor will use kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/special-workflow-trigger-shortened.yaml Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example List the workflow using argo list . Parameterization \u00b6 Similar to other type of triggers, sensor offers parameterization for the Argo workflow trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate the workflow object values on the fly. You can learn more about trigger parameterization here . Policy \u00b6 Trigger policy helps you determine the status of the triggered Argo workflow object and decide whether to stop or continue sensor. Take a look at K8s Trigger Policy . Argo CLI \u00b6 In addition to the example above, you can leverage other functionalities provided by the Argo CLI such as, Submit Submit --from Resubmit Resume Retry Suspend Terminate Stop To make use of Argo CLI operations in argoWorkflow trigger template, argoWorkflow: operation: submit # submit, submit-from, resubmit, resume, retry, suspend, terminate or stop Complete example is available here .","title":"Argo Workflow Trigger"},{"location":"sensors/triggers/argo-workflow/#argo-workflow-trigger","text":"Argo workflow is K8s custom resource which help orchestrating parallel jobs on Kubernetes.","title":"Argo Workflow Trigger"},{"location":"sensors/triggers/argo-workflow/#trigger-a-workflow","text":"Note: You will need to have Argo Workflows installed to make this work. Make sure to have the eventbus deployed in the namespace. We will use webhook event-source and sensor to trigger an Argo workflow. Set up the operate-workflow-sa service account that the sensor will use kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/special-workflow-trigger-shortened.yaml Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example List the workflow using argo list .","title":"Trigger a workflow"},{"location":"sensors/triggers/argo-workflow/#parameterization","text":"Similar to other type of triggers, sensor offers parameterization for the Argo workflow trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate the workflow object values on the fly. You can learn more about trigger parameterization here .","title":"Parameterization"},{"location":"sensors/triggers/argo-workflow/#policy","text":"Trigger policy helps you determine the status of the triggered Argo workflow object and decide whether to stop or continue sensor. Take a look at K8s Trigger Policy .","title":"Policy"},{"location":"sensors/triggers/argo-workflow/#argo-cli","text":"In addition to the example above, you can leverage other functionalities provided by the Argo CLI such as, Submit Submit --from Resubmit Resume Retry Suspend Terminate Stop To make use of Argo CLI operations in argoWorkflow trigger template, argoWorkflow: operation: submit # submit, submit-from, resubmit, resume, retry, suspend, terminate or stop Complete example is available here .","title":"Argo CLI"},{"location":"sensors/triggers/aws-lambda/","text":"AWS Lambda \u00b6 AWS Lambda provides a tremendous value, but the event driven lambda invocation is limited to SNS, SQS and few other event sources. Argo Events makes it easy to integrate lambda with event sources that are not native to AWS. Trigger A Simple Lambda \u00b6 Make sure to have eventbus deployed in the namespace. Make sure your AWS account has permissions to execute Lambda. More info on AWS permissions is available here . Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Create a basic lambda function called hello either using AWS cli or console. exports . handler = async ( event , context ) => { console . log ( 'name =' , event . name ); return event . name ; }; Let's set up webhook event-source to invoke the lambda over http requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Let's expose the webhook event-source using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Deploy the webhook sensor with AWS Lambda trigger. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-lambda-trigger.yaml Once the sensor pod is in running state, make a curl request to webhook event-source pod, curl -d '{\"name\":\"foo\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example It will trigger the AWS Lambda function hello . Look at the CloudWatch logs to verify. Specification \u00b6 The AWS Lambda trigger specification is available here . Request Payload \u00b6 Invoking the AWS Lambda without a request payload would not be very useful. The lambda trigger within a sensor is invoked when sensor receives an event from the eventbus. In order to construct a request payload based on the event data, sensor offers payload field as a part of the lambda trigger. Let's examine a lambda trigger, awsLambda : functionName : hello accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . name dest : name The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a request payload like below, { \"name\": \"foo\" // name field from event data } The above payload will be passed in the request to invoke the AWS lambda. You can add however many number of src and dest under payload . Note : Take a look at Parameterization in order to understand how to extract particular key-value from event data. Parameterization \u00b6 Similar to other type of triggers, sensor offers parameterization for the AWS Lambda trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate values like function name, payload values on the fly. Consider a scenario where you don't want to hard-code the function name and let the event data populate it. awsLambda : functionName : hello // this will be replaced . accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . message dest : message parameters : - src : dependencyName : test - dep dataKey : body . function_name dest : functionName With parameters the sensor will replace the function name hello with the value of field function_name from event data. You can learn more about trigger parameterization here . Policy \u00b6 Trigger policy helps you determine the status of the lambda invocation and decide whether to stop or continue sensor. To determine whether the lambda was successful or not, Lambda trigger provides a Status policy. The Status holds a list of response statuses that are considered valid. awsLambda : functionName : hello accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . message dest : message policy : status : allow : - 200 - 201 The above lambda trigger will be treated successful only if its invocation returns with either 200 or 201 status.","title":"AWS Lambda"},{"location":"sensors/triggers/aws-lambda/#aws-lambda","text":"AWS Lambda provides a tremendous value, but the event driven lambda invocation is limited to SNS, SQS and few other event sources. Argo Events makes it easy to integrate lambda with event sources that are not native to AWS.","title":"AWS Lambda"},{"location":"sensors/triggers/aws-lambda/#trigger-a-simple-lambda","text":"Make sure to have eventbus deployed in the namespace. Make sure your AWS account has permissions to execute Lambda. More info on AWS permissions is available here . Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Create a basic lambda function called hello either using AWS cli or console. exports . handler = async ( event , context ) => { console . log ( 'name =' , event . name ); return event . name ; }; Let's set up webhook event-source to invoke the lambda over http requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Let's expose the webhook event-source using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Deploy the webhook sensor with AWS Lambda trigger. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-lambda-trigger.yaml Once the sensor pod is in running state, make a curl request to webhook event-source pod, curl -d '{\"name\":\"foo\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example It will trigger the AWS Lambda function hello . Look at the CloudWatch logs to verify.","title":"Trigger A Simple Lambda"},{"location":"sensors/triggers/aws-lambda/#specification","text":"The AWS Lambda trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/aws-lambda/#request-payload","text":"Invoking the AWS Lambda without a request payload would not be very useful. The lambda trigger within a sensor is invoked when sensor receives an event from the eventbus. In order to construct a request payload based on the event data, sensor offers payload field as a part of the lambda trigger. Let's examine a lambda trigger, awsLambda : functionName : hello accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . name dest : name The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a request payload like below, { \"name\": \"foo\" // name field from event data } The above payload will be passed in the request to invoke the AWS lambda. You can add however many number of src and dest under payload . Note : Take a look at Parameterization in order to understand how to extract particular key-value from event data.","title":"Request Payload"},{"location":"sensors/triggers/aws-lambda/#parameterization","text":"Similar to other type of triggers, sensor offers parameterization for the AWS Lambda trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate values like function name, payload values on the fly. Consider a scenario where you don't want to hard-code the function name and let the event data populate it. awsLambda : functionName : hello // this will be replaced . accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . message dest : message parameters : - src : dependencyName : test - dep dataKey : body . function_name dest : functionName With parameters the sensor will replace the function name hello with the value of field function_name from event data. You can learn more about trigger parameterization here .","title":"Parameterization"},{"location":"sensors/triggers/aws-lambda/#policy","text":"Trigger policy helps you determine the status of the lambda invocation and decide whether to stop or continue sensor. To determine whether the lambda was successful or not, Lambda trigger provides a Status policy. The Status holds a list of response statuses that are considered valid. awsLambda : functionName : hello accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . message dest : message policy : status : allow : - 200 - 201 The above lambda trigger will be treated successful only if its invocation returns with either 200 or 201 status.","title":"Policy"},{"location":"sensors/triggers/azure-event-hubs/","text":"Azure Event Hubs \u00b6 Azure Event Hubs Trigger allows a sensor to publish events to Azure Event Hubs . Argo Events integrates with Azure Event Hubs to stream data from an EventSource NOTE: Parametrization for fqdn and hubName values are not yet supported. Specification \u00b6 The Azure Event Hubs trigger specification is available here . Send an Event to Azure Event Hubs \u00b6 Make sure to have the eventbus deployed in the namespace. Create an event hub . Make sure that the Shared Access Key used to connect to Azure Event Hubs has the Send policy. Get the Primary Key of the Shared Access Policy, the Name of the Shared Access Policy, the Hub Name , and the FQDN of the Azure Event Hubs Namespace. Create a secret called azure-event-hubs-secret as follows: NOTE: sharedAccessKey refers to the Primary Key and sharedAccessKeyName refers to the Name of the Shared Access Policy. apiVersion : v1 kind : Secret metadata : name : azure - event - hubs - secret type : Opaque data : sharedAccessKey : < base64 - shared - access - key > sharedAccessKeyName : < base64 - shared - access - key - name > Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the sensor with the following template. Replace the necessary values for fqdn and hubName : apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : azure - events - hub spec : dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : azure - eventhubs - trigger azureEventHubs : # FQDN of the EventsHub namespace you created # More info at https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string fqdn : eventhubs_fqdn sharedAccessKeyName : name : azure - event - hubs - secret key : sharedAccessKeyName sharedAccessKey : name : azure - event - hubs - secret key : sharedAccessKey # Event Hub path/name hubName : hub_name payload : - src : dependencyName : test - dep dataKey : body . message dest : message The Event needs a body. In order to construct a messaged based on your event data, the Azure Event Hubs sensor has the payload field as part of the trigger. The payload contains the list of src which refers to the source events and dest which refers to destination key within the resulting request payload. The payload declared above will generate a message body like below, { \"message\": \"some message here\" // name/key of the object } Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Verify Events have been in ingested in Azure Events Hub by creating a listener app or following other code samples . You can optionally create an Azure Event Hubs Event Source .","title":"Azure Event Hubs"},{"location":"sensors/triggers/azure-event-hubs/#azure-event-hubs","text":"Azure Event Hubs Trigger allows a sensor to publish events to Azure Event Hubs . Argo Events integrates with Azure Event Hubs to stream data from an EventSource NOTE: Parametrization for fqdn and hubName values are not yet supported.","title":"Azure Event Hubs"},{"location":"sensors/triggers/azure-event-hubs/#specification","text":"The Azure Event Hubs trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/azure-event-hubs/#send-an-event-to-azure-event-hubs","text":"Make sure to have the eventbus deployed in the namespace. Create an event hub . Make sure that the Shared Access Key used to connect to Azure Event Hubs has the Send policy. Get the Primary Key of the Shared Access Policy, the Name of the Shared Access Policy, the Hub Name , and the FQDN of the Azure Event Hubs Namespace. Create a secret called azure-event-hubs-secret as follows: NOTE: sharedAccessKey refers to the Primary Key and sharedAccessKeyName refers to the Name of the Shared Access Policy. apiVersion : v1 kind : Secret metadata : name : azure - event - hubs - secret type : Opaque data : sharedAccessKey : < base64 - shared - access - key > sharedAccessKeyName : < base64 - shared - access - key - name > Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the sensor with the following template. Replace the necessary values for fqdn and hubName : apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : azure - events - hub spec : dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : azure - eventhubs - trigger azureEventHubs : # FQDN of the EventsHub namespace you created # More info at https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string fqdn : eventhubs_fqdn sharedAccessKeyName : name : azure - event - hubs - secret key : sharedAccessKeyName sharedAccessKey : name : azure - event - hubs - secret key : sharedAccessKey # Event Hub path/name hubName : hub_name payload : - src : dependencyName : test - dep dataKey : body . message dest : message The Event needs a body. In order to construct a messaged based on your event data, the Azure Event Hubs sensor has the payload field as part of the trigger. The payload contains the list of src which refers to the source events and dest which refers to destination key within the resulting request payload. The payload declared above will generate a message body like below, { \"message\": \"some message here\" // name/key of the object } Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Verify Events have been in ingested in Azure Events Hub by creating a listener app or following other code samples . You can optionally create an Azure Event Hubs Event Source .","title":"Send an Event to Azure Event Hubs"},{"location":"sensors/triggers/azure-service-bus/","text":"Azure Service Bus \u00b6 Service Bus Trigger allows a sensor to send messages to Azure Service Bus queues and topics. Specification \u00b6 The Azure Service Bus trigger specification is available here . Setup \u00b6 Create a queue called test either using Azure CLI or Azure Service Bus management console. Fetch your connection string for Azure Service Bus and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus-sensor.yaml The Service Bus message needs a body. In order to construct a messaged based on your event data, the Azure Service Bus sensor has the payload field as part of the trigger. The payload declared above will generate a message body like below, { \"message\": \"some message here\" // name/key of the object } Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example. curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example","title":"Azure Service Bus"},{"location":"sensors/triggers/azure-service-bus/#azure-service-bus","text":"Service Bus Trigger allows a sensor to send messages to Azure Service Bus queues and topics.","title":"Azure Service Bus"},{"location":"sensors/triggers/azure-service-bus/#specification","text":"The Azure Service Bus trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/azure-service-bus/#setup","text":"Create a queue called test either using Azure CLI or Azure Service Bus management console. Fetch your connection string for Azure Service Bus and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus-sensor.yaml The Service Bus message needs a body. In order to construct a messaged based on your event data, the Azure Service Bus sensor has the payload field as part of the trigger. The payload declared above will generate a message body like below, { \"message\": \"some message here\" // name/key of the object } Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example. curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example","title":"Setup"},{"location":"sensors/triggers/build-your-own-trigger/","text":"Build Your Own Trigger \u00b6 Argo Events supports a variety of triggers out of box like Argo Workflow, K8s Objects, AWS Lambda, HTTP Requests etc., but you may want to write your own logic to trigger a pipeline or create an object in K8s cluster. An example would be to trigger TektonCD or AirFlow pipelines on GitHub events. Custom Trigger \u00b6 In order to plug your own implementation for a trigger with Argo Events Sensor, you need to run a gRPC server that implements the interface that the sensor expects. Interface \u00b6 The interface exposed via proto file, // Trigger offers services to build a custom trigger service Trigger { // FetchResource fetches the resource to be triggered. rpc FetchResource ( FetchResourceRequest ) returns ( FetchResourceResponse ); // Execute executes the requested trigger resource. rpc Execute ( ExecuteRequest ) returns ( ExecuteResponse ); // ApplyPolicy applies policies on the trigger execution result. rpc ApplyPolicy ( ApplyPolicyRequest ) returns ( ApplyPolicyResponse ); } The complete proto file is available here . Let's walk through the contract, FetchResource : If the trigger server needs to fetch a resource from external sources like S3, Git or a URL, this is the place to do so. e.g. if the trigger server aims to invoke a TektonCD pipeline and the PipelineRun resource lives on Git, then trigger server can first fetch it from Git and return it back to sensor. Execute : In this method, the trigger server executes/invokes the trigger. e.g. TektonCD pipeline resource being created in K8s cluster. ApplyPolicy : This is where your trigger implementation can check whether the triggered resource transitioned into the success state. Depending upon the response from the trigger server, the sensor will either stop processing subsequent triggers, or it will continue to process them. How to define the Custom Trigger in a sensor? \u00b6 Let's look at the following sensor, apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : webhook - sensor spec : dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : webhook - workflow - trigger custom : # the url of the trigger server. serverURL : tekton - trigger . argo - events . svc : 9000 # spec is map of string->string and it is sent over to trigger server. # the spec can be anything you want as per your use-case, just make sure the trigger server understands the spec map. spec : url : \"https://raw.githubusercontent.com/VaibhavPage/tekton-cd-trigger/master/example.yaml\" # These parameters are applied on resource fetched and returned by the trigger server. # e.g. consider a trigger server which invokes TektonCD pipeline runs, then # the trigger server can return a TektonCD PipelineRun resource. # The parameters are then applied on that PipelineRun resource. parameters : - src : dependencyName : test - dep dataKey : body . namespace dest : metadata . namespace # These parameters are applied on entire template body. # So that you can parameterize anything under `custom` key such as `serverURL`, `spec` etc. parameters : - src : dependencyName : test - dep dataKey : body . url dest : custom . spec . url The sensor definition should look familiar to you. The only difference is the custom key under triggers -> template . The specification under custom key defines the custom trigger. The most important fields are, serverURL : This is the URL of the trigger gRPC server. spec : It is a map of string -> string. The spec can be anything you want as per your use-case. The sensor sends the spec to trigger server, and it is upto the trigger gRPC server to interpret the spec. parameters : The parameters override the resource that is fetched by the trigger server. Read more info on parameters here . payload : Payload to send to the trigger server. Read more on payload here . The complete spec for the custom trigger is available here . Custom Trigger in Action \u00b6 Refer to a sample trigger server that invokes TektonCD pipeline on events.","title":"Build Your Own Trigger"},{"location":"sensors/triggers/build-your-own-trigger/#build-your-own-trigger","text":"Argo Events supports a variety of triggers out of box like Argo Workflow, K8s Objects, AWS Lambda, HTTP Requests etc., but you may want to write your own logic to trigger a pipeline or create an object in K8s cluster. An example would be to trigger TektonCD or AirFlow pipelines on GitHub events.","title":"Build Your Own Trigger"},{"location":"sensors/triggers/build-your-own-trigger/#custom-trigger","text":"In order to plug your own implementation for a trigger with Argo Events Sensor, you need to run a gRPC server that implements the interface that the sensor expects.","title":"Custom Trigger"},{"location":"sensors/triggers/build-your-own-trigger/#interface","text":"The interface exposed via proto file, // Trigger offers services to build a custom trigger service Trigger { // FetchResource fetches the resource to be triggered. rpc FetchResource ( FetchResourceRequest ) returns ( FetchResourceResponse ); // Execute executes the requested trigger resource. rpc Execute ( ExecuteRequest ) returns ( ExecuteResponse ); // ApplyPolicy applies policies on the trigger execution result. rpc ApplyPolicy ( ApplyPolicyRequest ) returns ( ApplyPolicyResponse ); } The complete proto file is available here . Let's walk through the contract, FetchResource : If the trigger server needs to fetch a resource from external sources like S3, Git or a URL, this is the place to do so. e.g. if the trigger server aims to invoke a TektonCD pipeline and the PipelineRun resource lives on Git, then trigger server can first fetch it from Git and return it back to sensor. Execute : In this method, the trigger server executes/invokes the trigger. e.g. TektonCD pipeline resource being created in K8s cluster. ApplyPolicy : This is where your trigger implementation can check whether the triggered resource transitioned into the success state. Depending upon the response from the trigger server, the sensor will either stop processing subsequent triggers, or it will continue to process them.","title":"Interface"},{"location":"sensors/triggers/build-your-own-trigger/#how-to-define-the-custom-trigger-in-a-sensor","text":"Let's look at the following sensor, apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : webhook - sensor spec : dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : webhook - workflow - trigger custom : # the url of the trigger server. serverURL : tekton - trigger . argo - events . svc : 9000 # spec is map of string->string and it is sent over to trigger server. # the spec can be anything you want as per your use-case, just make sure the trigger server understands the spec map. spec : url : \"https://raw.githubusercontent.com/VaibhavPage/tekton-cd-trigger/master/example.yaml\" # These parameters are applied on resource fetched and returned by the trigger server. # e.g. consider a trigger server which invokes TektonCD pipeline runs, then # the trigger server can return a TektonCD PipelineRun resource. # The parameters are then applied on that PipelineRun resource. parameters : - src : dependencyName : test - dep dataKey : body . namespace dest : metadata . namespace # These parameters are applied on entire template body. # So that you can parameterize anything under `custom` key such as `serverURL`, `spec` etc. parameters : - src : dependencyName : test - dep dataKey : body . url dest : custom . spec . url The sensor definition should look familiar to you. The only difference is the custom key under triggers -> template . The specification under custom key defines the custom trigger. The most important fields are, serverURL : This is the URL of the trigger gRPC server. spec : It is a map of string -> string. The spec can be anything you want as per your use-case. The sensor sends the spec to trigger server, and it is upto the trigger gRPC server to interpret the spec. parameters : The parameters override the resource that is fetched by the trigger server. Read more info on parameters here . payload : Payload to send to the trigger server. Read more on payload here . The complete spec for the custom trigger is available here .","title":"How to define the Custom Trigger in a sensor?"},{"location":"sensors/triggers/build-your-own-trigger/#custom-trigger-in-action","text":"Refer to a sample trigger server that invokes TektonCD pipeline on events.","title":"Custom Trigger in Action"},{"location":"sensors/triggers/email-trigger/","text":"Email Trigger \u00b6 The Email trigger is used to send a custom email to a desired set of email addresses using an SMTP server. The intended use is for notifications for a build pipeline, but can be used for any notification scenario. Prerequisite \u00b6 Deploy the eventbus in the namespace. Have an SMTP server setup. Create a kubernetes secret with the SMTP password in your cluster. kubectl create secret generic smtp-secret --from-literal=password=$SMTP_PASSWORD Note : If your SMTP server doesnot require authentication this step can be skipped. Create a webhook event-source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Set up port-forwarding to expose the http server. We will use port-forwarding here. kubectl port-forward -n argo-events 12000:12000 Email Trigger \u00b6 Lets say we want to send an email to a dynamic recepient using a custom email body template. The custom email body template we are going to use is the following: Hi , Hello There Thanks, Obi where the name has to be substituted with the receiver name from the event. Create a sensor with Email trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/email-trigger.yaml Note : Please update email.port , email.host and email.username to that of your SMTP server. If your SMTP server doesnot require authentication, the email.username and email.smtpPassword should be ommitted. Send a http request to the event-source-pod to fire the Email trigger. curl - d '{\"name\":\"Luke\", \"to\":\"your@email.com\"}' - H \"Content-Type: application/json\" - X POST http : // localhost : 12000 / example Note : You can modify the value for key \"to\" to send the email to your address. Alternatively you can skip providing the \"to\" in the payload to send an email to static email address provided in the trigger. curl -d '{\"name\":\"Luke\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Note : You have to remove the parameterization for email.to.0 and add email.to like so: email : ... to : - target1@email.com - target2@email.com ... Parameterization \u00b6 We can parameterize the to, from, subject and body of the email trigger for dynamic capabilities. The email trigger parameters have the following structure, - parameters: - src: dependencyName: test-dep dataKey: body.to dest: email.to.0 - src: dependencyName: test-dep dataKey: body.to dest: email.to.-1 - src: dependencyName: test-dep dataKey: body.from dest: email.from - src: dependencyName: test-dep dataKey: body.subject dest: email.subject - src: dependencyName: test-dep dataKey: body.emailBody dest: email.body email.to.index can be used to overwite an email address already specified in the trigger at the provided index. (where index is an integer) email.to.-1 can be used to append a new email address to the addresses to which an email will be sent. email.from can be used to specify the from address of the email sent. email.body can be used to specify the body of the email which will be sent. email.subject can be used to specify the subject of the email which will be sent. To understand more on parameterization, take a look at this tutorial . The complete specification of Email trigger is available here .","title":"Email Trigger"},{"location":"sensors/triggers/email-trigger/#email-trigger","text":"The Email trigger is used to send a custom email to a desired set of email addresses using an SMTP server. The intended use is for notifications for a build pipeline, but can be used for any notification scenario.","title":"Email Trigger"},{"location":"sensors/triggers/email-trigger/#prerequisite","text":"Deploy the eventbus in the namespace. Have an SMTP server setup. Create a kubernetes secret with the SMTP password in your cluster. kubectl create secret generic smtp-secret --from-literal=password=$SMTP_PASSWORD Note : If your SMTP server doesnot require authentication this step can be skipped. Create a webhook event-source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Set up port-forwarding to expose the http server. We will use port-forwarding here. kubectl port-forward -n argo-events 12000:12000","title":"Prerequisite"},{"location":"sensors/triggers/email-trigger/#email-trigger_1","text":"Lets say we want to send an email to a dynamic recepient using a custom email body template. The custom email body template we are going to use is the following: Hi , Hello There Thanks, Obi where the name has to be substituted with the receiver name from the event. Create a sensor with Email trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/email-trigger.yaml Note : Please update email.port , email.host and email.username to that of your SMTP server. If your SMTP server doesnot require authentication, the email.username and email.smtpPassword should be ommitted. Send a http request to the event-source-pod to fire the Email trigger. curl - d '{\"name\":\"Luke\", \"to\":\"your@email.com\"}' - H \"Content-Type: application/json\" - X POST http : // localhost : 12000 / example Note : You can modify the value for key \"to\" to send the email to your address. Alternatively you can skip providing the \"to\" in the payload to send an email to static email address provided in the trigger. curl -d '{\"name\":\"Luke\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Note : You have to remove the parameterization for email.to.0 and add email.to like so: email : ... to : - target1@email.com - target2@email.com ...","title":"Email Trigger"},{"location":"sensors/triggers/email-trigger/#parameterization","text":"We can parameterize the to, from, subject and body of the email trigger for dynamic capabilities. The email trigger parameters have the following structure, - parameters: - src: dependencyName: test-dep dataKey: body.to dest: email.to.0 - src: dependencyName: test-dep dataKey: body.to dest: email.to.-1 - src: dependencyName: test-dep dataKey: body.from dest: email.from - src: dependencyName: test-dep dataKey: body.subject dest: email.subject - src: dependencyName: test-dep dataKey: body.emailBody dest: email.body email.to.index can be used to overwite an email address already specified in the trigger at the provided index. (where index is an integer) email.to.-1 can be used to append a new email address to the addresses to which an email will be sent. email.from can be used to specify the from address of the email sent. email.body can be used to specify the body of the email which will be sent. email.subject can be used to specify the subject of the email which will be sent. To understand more on parameterization, take a look at this tutorial . The complete specification of Email trigger is available here .","title":"Parameterization"},{"location":"sensors/triggers/http-trigger/","text":"HTTP Trigger \u00b6 Argo Events offers HTTP trigger which can easily invoke serverless functions like OpenFaaS, Kubeless, Knative, Nuclio and make REST API calls. Specification \u00b6 The HTTP trigger specification is available here . REST API Calls \u00b6 Consider a scenario where your REST API server needs to consume events from event-sources S3, GitHub, SQS etc. Usually, you'd end up writing the integration yourself in the server code, although server logic has nothing to do any of the event-sources. This is where Argo Events HTTP trigger can help. The HTTP trigger takes the task of consuming events from event-sources away from API server and seamlessly integrates these events via REST API calls. We will set up a basic go http server and connect it with the Minio events. The HTTP server simply prints the request body as follows. package main import ( \"fmt\" \"io\" \"net/http\" ) func hello ( w http . ResponseWriter , req * http . Request ) { body , err := io . ReadAll ( req . Body ) if err != nil { fmt . Printf ( \"%+v \\n \" , err ) return } fmt . Println ( string ( body )) fmt . Fprintf ( w , \"hello \\n \" ) } func main () { http . HandleFunc ( \"/hello\" , hello ) fmt . Println ( \"server is listening on 8090\" ) http . ListenAndServe ( \":8090\" , nil ) } Deploy the HTTP server. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/09-http-trigger/http-server.yaml Create a service to expose the http server. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/09-http-trigger/http-server-svc.yaml Either use Ingress, OpenShift Route or port-forwarding to expose the http server. kubectl -n argo-events port-forward 8090:8090 Our goals is to seamlessly integrate Minio S3 bucket notifications with REST API server created in previous step. So, lets set up the Minio event-source available here . Don't create the sensor as we will be deploying it in next step. Create a sensor as follows. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/http-trigger.yaml Now, drop a file onto input bucket in Minio server. The sensor has triggered a http request to the http server. Take a look at the logs. server is listening on 8090 { \"type\" : \"minio\" , \"bucket\" : \"input\" } Great!!! Request Payload \u00b6 In order to construct a request payload based on the event data, sensor offers payload field as a part of the HTTP trigger. Let's examine a HTTP trigger, http : url : http : // http - server . argo - events . svc : 8090 / hello payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket - src : dependencyName : test - dep contextKey : type dest : type method : POST // GET , DELETE , POST , PUT , HEAD , etc . The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a request payload like below, { \"type\" : \"type of event from event's context\" \"bucket\" : \"bucket name from event data\" } The above payload will be passed in the HTTP request. You can add however many number of src and dest under payload . Note : Take a look at Parameterization in order to understand how to extract particular key-value from event data. Parameterization \u00b6 Similar to other type of triggers, sensor offers parameterization for the HTTP trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate values like URL, payload values on the fly. You can learn more about trigger parameterization here . Policy \u00b6 Trigger policy helps you determine the status of the HTTP request and decide whether to stop or continue sensor. To determine whether the HTTP request was successful or not, the HTTP trigger provides a Status policy. The Status holds a list of response statuses that are considered valid. http : url : http : // http - server . argo - events . svc : 8090 / hello payload : - src : dependencyName : test - dep dataKey : notification . 0 s3 . bucket . name dest : bucket - src : dependencyName : test - dep contextKey : type dest : type method : POST // GET , DELETE , POST , PUT , HEAD , etc . retryStrategy : steps : 3 duration : 3 s policy : status : allow : - 200 - 201 The above HTTP trigger will be treated successful only if the HTTP request returns with either 200 or 201 status. OpenFaaS \u00b6 OpenFaaS offers a simple way to spin up serverless functions. Lets see how we can leverage Argo Events HTTP trigger to invoke OpenFaaS function. If you don't have OpenFaaS installed, follow the instructions . Let's create a basic function. You can follow the steps . to set up the function. package function import ( \"fmt\" ) // Handle a serverless request func Handle ( req [] byte ) string { return fmt . Sprintf ( \"Hello, Go. You said: %s \" , string ( req )) } Make sure the function pod is up and running. We are going to invoke OpenFaaS function on a message on Redis Subscriber. Let's set up the Redis Database, Redis PubSub event-source as specified here . Do not create the Redis sensor, we are going to create it in next step. Let's create the sensor with OpenFaaS trigger. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : redis - sensor spec : dependencies : - name : test - dep eventSourceName : redis eventName : example triggers : - template : name : openfaas - trigger http : url : http : // gateway . openfaas . svc . cluster . local : 8080 / function / gohash payload : - src : dependencyName : test - dep dest : bucket method : POST Publish a message on FOO channel using redis-cli . PUBLISH FOO hello As soon as you publish the message, the sensor will invoke the OpenFaaS function gohash . Kubeless \u00b6 Similar to REST API calls, you can easily invoke Kubeless functions using HTTP trigger. If you don't have Kubeless installed, follow the installation . Lets create a basic function. def hello ( event , context ) : print event return event [ 'data' ] Make sure the function pod and service is created. Now, we are going to invoke the Kubeless function when a message is placed on a NATS queue. Let's set up the NATS event-source. Follow instructions for details. Do not create the NATS sensor, we are going to create it in next step. Let's create NATS sensor with HTTP trigger. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : nats - sensor spec : dependencies : - name : test - dep eventSourceName : nats eventName : example triggers : - template : name : kubeless - trigger http : serverURL : http : // hello . kubeless . svc . cluster . local : 8080 payload : - src : dependencyName : test - dep dataKey : body . first_name dest : first_name - src : dependencyName : test - dep dataKey : body . last_name dest : last_name method : POST Once event-source and sensor pod are up and running, dispatch a message on foo subject using nats client. go run main . go - s localhost foo '{\"first_name\": \"foo\", \"last_name\": \"bar\"}' It will invoke Kubeless function hello . { ' event - time ' : None , ' extensions ' : { ' request ' : < LocalRequest : POST http : //hello.kubeless.svc.cluster.local:8080/> }, 'event-type': None, 'event-namespace': None, 'data': '{\"first_name\":\"foo\",\"last_name\":\"bar\"}', 'event-id': None} Other serverless frameworks \u00b6 Similar to OpenFaaS and Kubeless invocation demonstrated above, you can easily trigger KNative, Nuclio, Fission functions using HTTP trigger.","title":"HTTP Trigger"},{"location":"sensors/triggers/http-trigger/#http-trigger","text":"Argo Events offers HTTP trigger which can easily invoke serverless functions like OpenFaaS, Kubeless, Knative, Nuclio and make REST API calls.","title":"HTTP Trigger"},{"location":"sensors/triggers/http-trigger/#specification","text":"The HTTP trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/http-trigger/#rest-api-calls","text":"Consider a scenario where your REST API server needs to consume events from event-sources S3, GitHub, SQS etc. Usually, you'd end up writing the integration yourself in the server code, although server logic has nothing to do any of the event-sources. This is where Argo Events HTTP trigger can help. The HTTP trigger takes the task of consuming events from event-sources away from API server and seamlessly integrates these events via REST API calls. We will set up a basic go http server and connect it with the Minio events. The HTTP server simply prints the request body as follows. package main import ( \"fmt\" \"io\" \"net/http\" ) func hello ( w http . ResponseWriter , req * http . Request ) { body , err := io . ReadAll ( req . Body ) if err != nil { fmt . Printf ( \"%+v \\n \" , err ) return } fmt . Println ( string ( body )) fmt . Fprintf ( w , \"hello \\n \" ) } func main () { http . HandleFunc ( \"/hello\" , hello ) fmt . Println ( \"server is listening on 8090\" ) http . ListenAndServe ( \":8090\" , nil ) } Deploy the HTTP server. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/09-http-trigger/http-server.yaml Create a service to expose the http server. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/09-http-trigger/http-server-svc.yaml Either use Ingress, OpenShift Route or port-forwarding to expose the http server. kubectl -n argo-events port-forward 8090:8090 Our goals is to seamlessly integrate Minio S3 bucket notifications with REST API server created in previous step. So, lets set up the Minio event-source available here . Don't create the sensor as we will be deploying it in next step. Create a sensor as follows. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/http-trigger.yaml Now, drop a file onto input bucket in Minio server. The sensor has triggered a http request to the http server. Take a look at the logs. server is listening on 8090 { \"type\" : \"minio\" , \"bucket\" : \"input\" } Great!!!","title":"REST API Calls"},{"location":"sensors/triggers/http-trigger/#request-payload","text":"In order to construct a request payload based on the event data, sensor offers payload field as a part of the HTTP trigger. Let's examine a HTTP trigger, http : url : http : // http - server . argo - events . svc : 8090 / hello payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket - src : dependencyName : test - dep contextKey : type dest : type method : POST // GET , DELETE , POST , PUT , HEAD , etc . The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a request payload like below, { \"type\" : \"type of event from event's context\" \"bucket\" : \"bucket name from event data\" } The above payload will be passed in the HTTP request. You can add however many number of src and dest under payload . Note : Take a look at Parameterization in order to understand how to extract particular key-value from event data.","title":"Request Payload"},{"location":"sensors/triggers/http-trigger/#parameterization","text":"Similar to other type of triggers, sensor offers parameterization for the HTTP trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate values like URL, payload values on the fly. You can learn more about trigger parameterization here .","title":"Parameterization"},{"location":"sensors/triggers/http-trigger/#policy","text":"Trigger policy helps you determine the status of the HTTP request and decide whether to stop or continue sensor. To determine whether the HTTP request was successful or not, the HTTP trigger provides a Status policy. The Status holds a list of response statuses that are considered valid. http : url : http : // http - server . argo - events . svc : 8090 / hello payload : - src : dependencyName : test - dep dataKey : notification . 0 s3 . bucket . name dest : bucket - src : dependencyName : test - dep contextKey : type dest : type method : POST // GET , DELETE , POST , PUT , HEAD , etc . retryStrategy : steps : 3 duration : 3 s policy : status : allow : - 200 - 201 The above HTTP trigger will be treated successful only if the HTTP request returns with either 200 or 201 status.","title":"Policy"},{"location":"sensors/triggers/http-trigger/#openfaas","text":"OpenFaaS offers a simple way to spin up serverless functions. Lets see how we can leverage Argo Events HTTP trigger to invoke OpenFaaS function. If you don't have OpenFaaS installed, follow the instructions . Let's create a basic function. You can follow the steps . to set up the function. package function import ( \"fmt\" ) // Handle a serverless request func Handle ( req [] byte ) string { return fmt . Sprintf ( \"Hello, Go. You said: %s \" , string ( req )) } Make sure the function pod is up and running. We are going to invoke OpenFaaS function on a message on Redis Subscriber. Let's set up the Redis Database, Redis PubSub event-source as specified here . Do not create the Redis sensor, we are going to create it in next step. Let's create the sensor with OpenFaaS trigger. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : redis - sensor spec : dependencies : - name : test - dep eventSourceName : redis eventName : example triggers : - template : name : openfaas - trigger http : url : http : // gateway . openfaas . svc . cluster . local : 8080 / function / gohash payload : - src : dependencyName : test - dep dest : bucket method : POST Publish a message on FOO channel using redis-cli . PUBLISH FOO hello As soon as you publish the message, the sensor will invoke the OpenFaaS function gohash .","title":"OpenFaaS"},{"location":"sensors/triggers/http-trigger/#kubeless","text":"Similar to REST API calls, you can easily invoke Kubeless functions using HTTP trigger. If you don't have Kubeless installed, follow the installation . Lets create a basic function. def hello ( event , context ) : print event return event [ 'data' ] Make sure the function pod and service is created. Now, we are going to invoke the Kubeless function when a message is placed on a NATS queue. Let's set up the NATS event-source. Follow instructions for details. Do not create the NATS sensor, we are going to create it in next step. Let's create NATS sensor with HTTP trigger. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : nats - sensor spec : dependencies : - name : test - dep eventSourceName : nats eventName : example triggers : - template : name : kubeless - trigger http : serverURL : http : // hello . kubeless . svc . cluster . local : 8080 payload : - src : dependencyName : test - dep dataKey : body . first_name dest : first_name - src : dependencyName : test - dep dataKey : body . last_name dest : last_name method : POST Once event-source and sensor pod are up and running, dispatch a message on foo subject using nats client. go run main . go - s localhost foo '{\"first_name\": \"foo\", \"last_name\": \"bar\"}' It will invoke Kubeless function hello . { ' event - time ' : None , ' extensions ' : { ' request ' : < LocalRequest : POST http : //hello.kubeless.svc.cluster.local:8080/> }, 'event-type': None, 'event-namespace': None, 'data': '{\"first_name\":\"foo\",\"last_name\":\"bar\"}', 'event-id': None}","title":"Kubeless"},{"location":"sensors/triggers/http-trigger/#other-serverless-frameworks","text":"Similar to OpenFaaS and Kubeless invocation demonstrated above, you can easily trigger KNative, Nuclio, Fission functions using HTTP trigger.","title":"Other serverless frameworks"},{"location":"sensors/triggers/k8s-object-trigger/","text":"Kubernetes Object Trigger \u00b6 Apart from Argo workflow objects, the sensor lets you trigger any Kubernetes objects including Custom Resources such as Pod, Deployment, Job, CronJob, etc. Having the ability to trigger Kubernetes objects is quite powerful as providing an avenue to set up event-driven pipelines for existing workloads. Trigger a K8s Pod \u00b6 We will use webhook event-source and sensor to trigger a K8s pod. Lets set up a webhook event source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml To trigger a pod, we need to create a sensor as defined below. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : create - pod - sa # A service account has privileges to create a Pod dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : webhook - pod - trigger k8s : operation : create source : resource : apiVersion : v1 kind : Pod metadata : generateName : hello - world - spec : containers : - name : hello - container args : - \"hello-world\" command : - cowsay image : \"docker/whalesay:latest\" parameters : - src : dependencyName : test - dep dest : spec . containers . 0 . args . 0 Create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/trigger-standard-k8s-resource.yaml Lets expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ / Operation \u00b6 You can specify the operation for the trigger using the operation key under triggers->template->k8s. Operation can be either. create : Creates the object if not available in K8s cluster. update : Updates the object. patch : Patches the object using given patch strategy. delete : Deletes the object if it exists. More info available at here . Parameterization \u00b6 Similar to other type of triggers, sensor offers parameterization for the K8s trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate the K8s object values on the fly. You can learn more about trigger parameterization here . Policy \u00b6 Trigger policy helps you determine the status of the triggered K8s object and decide whether to stop or continue sensor. To determine whether the K8s object was successful or not, the K8s trigger provides a Resource Labels policy. The Resource Labels holds a list of labels which are checked against the triggered K8s object to determine the status of the object. # Policy to configure backoff and execution criteria for the trigger # Because the sensor is able to trigger any K8s resource , it determines the resource state by looking at the resource 's labels. policy: k8s: # Backoff before checking the resource labels backoff: # Duration is the duration in nanoseconds duration: 1000000000 # 1 second # Duration is multiplied by factor each iteration factor: 2 # The amount of jitter applied each iteration jitter: 0.1 # Exit with error after these many steps steps: 5 # labels set on the resource decide if the resource has transitioned into the success state. labels: workflows.argoproj.io/phase: Succeeded # Determines whether trigger should be marked as failed if the backoff times out and sensor is still unable to decide the state of the trigger. # defaults to false errorOnBackoffTimeout: true Complete example is available here .","title":"Kubernetes Object Trigger"},{"location":"sensors/triggers/k8s-object-trigger/#kubernetes-object-trigger","text":"Apart from Argo workflow objects, the sensor lets you trigger any Kubernetes objects including Custom Resources such as Pod, Deployment, Job, CronJob, etc. Having the ability to trigger Kubernetes objects is quite powerful as providing an avenue to set up event-driven pipelines for existing workloads.","title":"Kubernetes Object Trigger"},{"location":"sensors/triggers/k8s-object-trigger/#trigger-a-k8s-pod","text":"We will use webhook event-source and sensor to trigger a K8s pod. Lets set up a webhook event source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml To trigger a pod, we need to create a sensor as defined below. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : create - pod - sa # A service account has privileges to create a Pod dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : webhook - pod - trigger k8s : operation : create source : resource : apiVersion : v1 kind : Pod metadata : generateName : hello - world - spec : containers : - name : hello - container args : - \"hello-world\" command : - cowsay image : \"docker/whalesay:latest\" parameters : - src : dependencyName : test - dep dest : spec . containers . 0 . args . 0 Create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/trigger-standard-k8s-resource.yaml Lets expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ /","title":"Trigger a K8s Pod"},{"location":"sensors/triggers/k8s-object-trigger/#operation","text":"You can specify the operation for the trigger using the operation key under triggers->template->k8s. Operation can be either. create : Creates the object if not available in K8s cluster. update : Updates the object. patch : Patches the object using given patch strategy. delete : Deletes the object if it exists. More info available at here .","title":"Operation"},{"location":"sensors/triggers/k8s-object-trigger/#parameterization","text":"Similar to other type of triggers, sensor offers parameterization for the K8s trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate the K8s object values on the fly. You can learn more about trigger parameterization here .","title":"Parameterization"},{"location":"sensors/triggers/k8s-object-trigger/#policy","text":"Trigger policy helps you determine the status of the triggered K8s object and decide whether to stop or continue sensor. To determine whether the K8s object was successful or not, the K8s trigger provides a Resource Labels policy. The Resource Labels holds a list of labels which are checked against the triggered K8s object to determine the status of the object. # Policy to configure backoff and execution criteria for the trigger # Because the sensor is able to trigger any K8s resource , it determines the resource state by looking at the resource 's labels. policy: k8s: # Backoff before checking the resource labels backoff: # Duration is the duration in nanoseconds duration: 1000000000 # 1 second # Duration is multiplied by factor each iteration factor: 2 # The amount of jitter applied each iteration jitter: 0.1 # Exit with error after these many steps steps: 5 # labels set on the resource decide if the resource has transitioned into the success state. labels: workflows.argoproj.io/phase: Succeeded # Determines whether trigger should be marked as failed if the backoff times out and sensor is still unable to decide the state of the trigger. # defaults to false errorOnBackoffTimeout: true Complete example is available here .","title":"Policy"},{"location":"sensors/triggers/kafka-trigger/","text":"Kafka Trigger \u00b6 Kafka trigger allows sensor to publish events on Kafka topic. This trigger helps source the events from outside world into your messaging queues. Specification \u00b6 The Kafka trigger specification is available here . Walkthrough \u00b6 Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Kafka topic. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : kafka - trigger kafka : # Kafka URL url : kafka . argo - events . svc : 9092 # Name of the topic topic : minio - events # partition id partition : 0 payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The Kafka message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the Kafka trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below. { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } Drop a file called hello.txt onto the bucket input and you will receive the message on Kafka topic","title":"Kafka Trigger"},{"location":"sensors/triggers/kafka-trigger/#kafka-trigger","text":"Kafka trigger allows sensor to publish events on Kafka topic. This trigger helps source the events from outside world into your messaging queues.","title":"Kafka Trigger"},{"location":"sensors/triggers/kafka-trigger/#specification","text":"The Kafka trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/kafka-trigger/#walkthrough","text":"Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Kafka topic. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : kafka - trigger kafka : # Kafka URL url : kafka . argo - events . svc : 9092 # Name of the topic topic : minio - events # partition id partition : 0 payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The Kafka message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the Kafka trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below. { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } Drop a file called hello.txt onto the bucket input and you will receive the message on Kafka topic","title":"Walkthrough"},{"location":"sensors/triggers/log/","text":"Log \u00b6 Log trigger is for debugging - it just logs events it receives as JSON: { \"level\" : \"info\" , \"ts\" : 1604783266.973979 , \"logger\" : \"argo-events.sensor\" , \"caller\" : \"log/log.go:35\" , \"msg\" : \"{\\\"eventTime\\\":\\\"2020-11-07 21:07:46.9658533 +0000 UTC m=+20468.986115001\\\"}\" , \"sensorName\" : \"log\" , \"triggerName\" : \"log-trigger\" , \"dependencyName\" : \"test-dep\" , \"eventContext\" : \"{\\\"id\\\":\\\"37363664356662642d616364322d343563332d396362622d353037653361343637393237\\\",\\\"source\\\":\\\"calendar\\\",\\\"specversion\\\":\\\"1.0\\\",\\\"type\\\":\\\"calendar\\\",\\\"datacontenttype\\\":\\\"application/json\\\",\\\"subject\\\":\\\"example-with-interval\\\",\\\"time\\\":\\\"2020-11-07T21:07:46Z\\\"}\" } Specification \u00b6 The specification is available here . Parameterization \u00b6 No parameterization is supported.","title":"Log"},{"location":"sensors/triggers/log/#log","text":"Log trigger is for debugging - it just logs events it receives as JSON: { \"level\" : \"info\" , \"ts\" : 1604783266.973979 , \"logger\" : \"argo-events.sensor\" , \"caller\" : \"log/log.go:35\" , \"msg\" : \"{\\\"eventTime\\\":\\\"2020-11-07 21:07:46.9658533 +0000 UTC m=+20468.986115001\\\"}\" , \"sensorName\" : \"log\" , \"triggerName\" : \"log-trigger\" , \"dependencyName\" : \"test-dep\" , \"eventContext\" : \"{\\\"id\\\":\\\"37363664356662642d616364322d343563332d396362622d353037653361343637393237\\\",\\\"source\\\":\\\"calendar\\\",\\\"specversion\\\":\\\"1.0\\\",\\\"type\\\":\\\"calendar\\\",\\\"datacontenttype\\\":\\\"application/json\\\",\\\"subject\\\":\\\"example-with-interval\\\",\\\"time\\\":\\\"2020-11-07T21:07:46Z\\\"}\" }","title":"Log"},{"location":"sensors/triggers/log/#specification","text":"The specification is available here .","title":"Specification"},{"location":"sensors/triggers/log/#parameterization","text":"No parameterization is supported.","title":"Parameterization"},{"location":"sensors/triggers/nats-trigger/","text":"NATS Trigger \u00b6 NATS trigger allows sensor to publish events on NATS subjects. This trigger helps source the events from outside world into your messaging queues. Specification \u00b6 The NATS trigger specification is available here . Walkthrough \u00b6 Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a NATS subject. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : nats - trigger nats : # NATS Server URL url : nats . argo - events . svc : 4222 # Name of the subject subject : minio - events payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The NATS message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the NATS trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below, { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } If you are running NATS on local K8s cluster, make sure to port-forward to pod. kubectl -n argo-events port-forward 4222:4222 Subscribe to the subject called minio-events . Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe . go run main . go - s localhost minio - events ' Drop a file called hello.txt onto the bucket input and you will receive the message on NATS subscriber as follows. [#1] Received on [minio-events]: '{\"bucket\":\"input\",\"fileName\":\"hello.txt\"}'","title":"NATS Trigger"},{"location":"sensors/triggers/nats-trigger/#nats-trigger","text":"NATS trigger allows sensor to publish events on NATS subjects. This trigger helps source the events from outside world into your messaging queues.","title":"NATS Trigger"},{"location":"sensors/triggers/nats-trigger/#specification","text":"The NATS trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/nats-trigger/#walkthrough","text":"Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a NATS subject. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : nats - trigger nats : # NATS Server URL url : nats . argo - events . svc : 4222 # Name of the subject subject : minio - events payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The NATS message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the NATS trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below, { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } If you are running NATS on local K8s cluster, make sure to port-forward to pod. kubectl -n argo-events port-forward 4222:4222 Subscribe to the subject called minio-events . Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe . go run main . go - s localhost minio - events ' Drop a file called hello.txt onto the bucket input and you will receive the message on NATS subscriber as follows. [#1] Received on [minio-events]: '{\"bucket\":\"input\",\"fileName\":\"hello.txt\"}'","title":"Walkthrough"},{"location":"sensors/triggers/openwhisk-trigger/","text":"OpenWhisk Trigger \u00b6 OpenWhisk is a framework to run serverless workloads. It ships with its own event sources but their numbers are limited and it doesn't have support for circuits, parameterization, filtering, on-demand payload construction, etc that a sensor provides. Prerequisite \u00b6 OpenWhisk must be up and running. Setup \u00b6 Coming Soon...","title":"OpenWhisk Trigger"},{"location":"sensors/triggers/openwhisk-trigger/#openwhisk-trigger","text":"OpenWhisk is a framework to run serverless workloads. It ships with its own event sources but their numbers are limited and it doesn't have support for circuits, parameterization, filtering, on-demand payload construction, etc that a sensor provides.","title":"OpenWhisk Trigger"},{"location":"sensors/triggers/openwhisk-trigger/#prerequisite","text":"OpenWhisk must be up and running.","title":"Prerequisite"},{"location":"sensors/triggers/openwhisk-trigger/#setup","text":"Coming Soon...","title":"Setup"},{"location":"sensors/triggers/pulsar-trigger/","text":"Pulsar Trigger \u00b6 Pulsar trigger allows sensor to publish events on Pulsar topic. This trigger helps source the events from outside world into your messaging queues. Specification \u00b6 The Pulsar trigger specification is available here . Walkthrough \u00b6 Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Pulsar topic. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : pulsar - trigger pulsar : # Pulsar URL url : pulsar : // pulsar . argo - events . svc : 6650 # Name of the topic topic : minio - events payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The Pulsar message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the Pulsar trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below. { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } Drop a file called hello.txt onto the bucket input and you will receive the message on Pulsar topic","title":"Pulsar Trigger"},{"location":"sensors/triggers/pulsar-trigger/#pulsar-trigger","text":"Pulsar trigger allows sensor to publish events on Pulsar topic. This trigger helps source the events from outside world into your messaging queues.","title":"Pulsar Trigger"},{"location":"sensors/triggers/pulsar-trigger/#specification","text":"The Pulsar trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/pulsar-trigger/#walkthrough","text":"Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Pulsar topic. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : pulsar - trigger pulsar : # Pulsar URL url : pulsar : // pulsar . argo - events . svc : 6650 # Name of the topic topic : minio - events payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The Pulsar message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the Pulsar trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below. { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } Drop a file called hello.txt onto the bucket input and you will receive the message on Pulsar topic","title":"Walkthrough"},{"location":"sensors/triggers/slack-trigger/","text":"Slack Trigger \u00b6 The Slack trigger is used to send a custom message to a desired Slack channel in a Slack workspace. The intended use is for notifications for a build pipeline, but can be used for any notification scenario. Prerequisite \u00b6 Deploy the eventbus in the namespace. Make sure to have a Slack workspace setup you wish to send a message to. Create a webhook event-source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Set up port-forwarding to expose the http server. We will use port-forwarding here. kubectl port-forward -n argo-events 12000:12000 Create a Slack App \u00b6 We need to create a Slack App which will send messages to your Slack Workspace. We will add OAuth Permissions and add the OAuth token to the k8s cluster via a secret. Create a Slack app by clicking Create New App at the Slack API Page . Name your app and choose your intended Slack Workspace. Navigate to your app, then to Features > OAuth & Permissions . Scroll down to Scopes and add the scopes channels:join , channels:read , groups:read and chat:write to the Bot Token Scopes . Scroll to the top of the OAuth & Permissions page and click Install App to Workspace and follow the install Wizard. You should land back on the OAuth & Permissions page. Copy your app's OAuth Access Token. This will allow the trigger to act on behalf of your newly created Slack app. Create a kubernetes secret with the OAuth token in your cluster. kubectl create secret generic slack-secret --from-literal=token=$SLACK_OAUTH_TOKEN Slack Trigger \u00b6 We will set up a basic slack trigger and send a default message, and then a dynamic custom message. Create a sensor with Slack trigger. We will discuss the trigger details in the following sections. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/slack-trigger.yaml Send a http request to the event-source-pod to fire the Slack trigger. curl -d '{\"text\":\"Hello, World!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Note : The default slack-trigger will send the message \"hello world\" to the #general channel. You may change the default message and channel in slack-trigger.yaml under triggers.slack.channel and triggers.slack.message. Alternatively, you can dynamically determine the channel and message based on parameterization of your event. curl - d '{\"channel\":\"random\",\"message\":\"test message\"}' - H \"Content-Type: application/json\" - X POST http : // localhost : 12000 / example Great! But, how did the sensor use the event to customize the message and channel from the http request? We will see that in next section. Parameterization \u00b6 The slack trigger parameters have the following structure, parameters: - src: dependencyName: test-dep dataKey: body.channel dest: slack.channel - src: dependencyName: test-dep contextKey: body.message dest: slack.message The src is the source of event. It contains, dependencyName : name of the event dependency to extract the event from. dataKey : to extract a particular key-value from event's data. contextKey : to extract a particular key-value from event' context. The dest is the destination key within the result payload. So, the above trigger parameters will generate a request payload as, { \"channel\": \"channel_to_send_message\", \"message\": \"message_to_send_to_channel\" } Note : If you define both the contextKey and dataKey within a parameter item, then the dataKey takes the precedence. You can create any parameter structure you want. To get more info on how to generate complex event payloads, take a look at this library . Other Capabilities \u00b6 Configuring the sender of the Slack message: \u00b6 - template: name: slack-trigger slack: sender: username: \"Cool Robot\" icon: \":robot_face:\" # emoji or url, e.g. https://example.com/image.png Sending messages to Slack threads: \u00b6 - template: name: slack - trigger slack: thread: messageAggregationKey: \"abcdefg\" # aggregate message by some key to send them to the same Slack thread broadcastMessageToChannel: true # also broadcast the message from the thread to the channel Sending attachments using Slack Attachments API : \u00b6 - template: name: slack-trigger slack: message: \"hello world!\" attachments: | [{ \"title\": \"Attachment1!\", \"title_link\": \"https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/\", \"color\": \"#18be52\", \"fields\": [{ \"title\": \"Hello1\", \"value\": \"Hello World1\", \"short\": true }, { \"title\": \"Hello2\", \"value\": \"Hello World2\", \"short\": true }] }, { \"title\": \"Attachment2!\", \"title_link\": \"https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/\", \"color\": \"#18be52\", \"fields\": [{ \"title\": \"Hello1\", \"value\": \"Hello World1\", \"short\": true }, { \"title\": \"Hello2\", \"value\": \"Hello World2\", \"short\": true }] }] Sending blocks using Slack Blocks API : \u00b6 - template : name : slack - trigger slack : blocks : | [{ \"type\" : \"actions\" , \"block_id\" : \"actionblock789\" , \"elements\" : [{ \"type\" : \"datepicker\" , \"action_id\" : \"datepicker123\" , \"initial_date\" : \"1990-04-28\" , \"placeholder\" : { \"type\" : \"plain_text\" , \"text\" : \"Select a date\" } }, { \"type\" : \"overflow\" , \"options\" : [{ \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-0\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-1\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-2\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-3\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-4\" } ], \"action_id\" : \"overflow\" }, { \"type\" : \"button\" , \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"Click Me\" }, \"value\" : \"click_me_123\" , \"action_id\" : \"button\" } ] }] The complete specification of Slack trigger is available here .","title":"Slack Trigger"},{"location":"sensors/triggers/slack-trigger/#slack-trigger","text":"The Slack trigger is used to send a custom message to a desired Slack channel in a Slack workspace. The intended use is for notifications for a build pipeline, but can be used for any notification scenario.","title":"Slack Trigger"},{"location":"sensors/triggers/slack-trigger/#prerequisite","text":"Deploy the eventbus in the namespace. Make sure to have a Slack workspace setup you wish to send a message to. Create a webhook event-source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Set up port-forwarding to expose the http server. We will use port-forwarding here. kubectl port-forward -n argo-events 12000:12000","title":"Prerequisite"},{"location":"sensors/triggers/slack-trigger/#create-a-slack-app","text":"We need to create a Slack App which will send messages to your Slack Workspace. We will add OAuth Permissions and add the OAuth token to the k8s cluster via a secret. Create a Slack app by clicking Create New App at the Slack API Page . Name your app and choose your intended Slack Workspace. Navigate to your app, then to Features > OAuth & Permissions . Scroll down to Scopes and add the scopes channels:join , channels:read , groups:read and chat:write to the Bot Token Scopes . Scroll to the top of the OAuth & Permissions page and click Install App to Workspace and follow the install Wizard. You should land back on the OAuth & Permissions page. Copy your app's OAuth Access Token. This will allow the trigger to act on behalf of your newly created Slack app. Create a kubernetes secret with the OAuth token in your cluster. kubectl create secret generic slack-secret --from-literal=token=$SLACK_OAUTH_TOKEN","title":"Create a Slack App"},{"location":"sensors/triggers/slack-trigger/#slack-trigger_1","text":"We will set up a basic slack trigger and send a default message, and then a dynamic custom message. Create a sensor with Slack trigger. We will discuss the trigger details in the following sections. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/slack-trigger.yaml Send a http request to the event-source-pod to fire the Slack trigger. curl -d '{\"text\":\"Hello, World!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Note : The default slack-trigger will send the message \"hello world\" to the #general channel. You may change the default message and channel in slack-trigger.yaml under triggers.slack.channel and triggers.slack.message. Alternatively, you can dynamically determine the channel and message based on parameterization of your event. curl - d '{\"channel\":\"random\",\"message\":\"test message\"}' - H \"Content-Type: application/json\" - X POST http : // localhost : 12000 / example Great! But, how did the sensor use the event to customize the message and channel from the http request? We will see that in next section.","title":"Slack Trigger"},{"location":"sensors/triggers/slack-trigger/#parameterization","text":"The slack trigger parameters have the following structure, parameters: - src: dependencyName: test-dep dataKey: body.channel dest: slack.channel - src: dependencyName: test-dep contextKey: body.message dest: slack.message The src is the source of event. It contains, dependencyName : name of the event dependency to extract the event from. dataKey : to extract a particular key-value from event's data. contextKey : to extract a particular key-value from event' context. The dest is the destination key within the result payload. So, the above trigger parameters will generate a request payload as, { \"channel\": \"channel_to_send_message\", \"message\": \"message_to_send_to_channel\" } Note : If you define both the contextKey and dataKey within a parameter item, then the dataKey takes the precedence. You can create any parameter structure you want. To get more info on how to generate complex event payloads, take a look at this library .","title":"Parameterization"},{"location":"sensors/triggers/slack-trigger/#other-capabilities","text":"","title":"Other Capabilities"},{"location":"sensors/triggers/slack-trigger/#configuring-the-sender-of-the-slack-message","text":"- template: name: slack-trigger slack: sender: username: \"Cool Robot\" icon: \":robot_face:\" # emoji or url, e.g. https://example.com/image.png","title":"Configuring the sender of the Slack message:"},{"location":"sensors/triggers/slack-trigger/#sending-messages-to-slack-threads","text":"- template: name: slack - trigger slack: thread: messageAggregationKey: \"abcdefg\" # aggregate message by some key to send them to the same Slack thread broadcastMessageToChannel: true # also broadcast the message from the thread to the channel","title":"Sending messages to Slack threads:"},{"location":"sensors/triggers/slack-trigger/#sending-attachments-using-slack-attachments-api","text":"- template: name: slack-trigger slack: message: \"hello world!\" attachments: | [{ \"title\": \"Attachment1!\", \"title_link\": \"https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/\", \"color\": \"#18be52\", \"fields\": [{ \"title\": \"Hello1\", \"value\": \"Hello World1\", \"short\": true }, { \"title\": \"Hello2\", \"value\": \"Hello World2\", \"short\": true }] }, { \"title\": \"Attachment2!\", \"title_link\": \"https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/\", \"color\": \"#18be52\", \"fields\": [{ \"title\": \"Hello1\", \"value\": \"Hello World1\", \"short\": true }, { \"title\": \"Hello2\", \"value\": \"Hello World2\", \"short\": true }] }]","title":"Sending attachments using Slack Attachments API:"},{"location":"sensors/triggers/slack-trigger/#sending-blocks-using-slack-blocks-api","text":"- template : name : slack - trigger slack : blocks : | [{ \"type\" : \"actions\" , \"block_id\" : \"actionblock789\" , \"elements\" : [{ \"type\" : \"datepicker\" , \"action_id\" : \"datepicker123\" , \"initial_date\" : \"1990-04-28\" , \"placeholder\" : { \"type\" : \"plain_text\" , \"text\" : \"Select a date\" } }, { \"type\" : \"overflow\" , \"options\" : [{ \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-0\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-1\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-2\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-3\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-4\" } ], \"action_id\" : \"overflow\" }, { \"type\" : \"button\" , \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"Click Me\" }, \"value\" : \"click_me_123\" , \"action_id\" : \"button\" } ] }] The complete specification of Slack trigger is available here .","title":"Sending blocks using Slack Blocks API:"},{"location":"tutorials/01-introduction/","text":"Introduction \u00b6 In the tutorials, we will cover every aspect of Argo Events and demonstrate how you can leverage these features to build an event driven workflow pipeline. All the concepts you will learn in this tutorial and subsequent ones can be applied to any type of event-source. Prerequisites \u00b6 Follow the installation guide to set up the Argo Events. Make sure to configure Argo Workflow controller to listen to workflow objects created in argo-events namespace. (See this link.) The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no \"--namespaced\" argument) so that it has visibility to all namespaces, or with \"--managed-namespace\" set to define \"argo-events\" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file: kubectl apply - n argo - f https : // github . com / argoproj / argo - workflows / releases / latest / download / install . yaml Make sure to read the concepts behind eventbus . sensor . event source . Follow the instruction to create a Service Account operate-workflow-sa with proper privileges, and make sure the Service Account used by Workflows (here we use default in the tutorials for demonstration purpose) has proper RBAC settings. Get Started \u00b6 We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. Let' set up the eventbus. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Create the webhook event source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the webhook sensor. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml If the commands are executed successfully, the eventbus, event-source and sensor pods will get created. You will also notice that a service is created for the event-source. Expose the event-source pod via Ingress, OpenShift Route or port forward to consume requests over HTTP. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf Make sure the workflow pod ran successfully. argo logs - n argo - events @latest Should result in something similar to what is below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"38376665363064642d343336352d34 | | 3035372d393766662d366234326130656232343 | | 337\" , \"time\" : \"2020-01-11T16:55:42.996636 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIzOCJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | | jp7Im1lc3NhZ2UiOiJ0aGlzIGlzIG15IGZpcnN0 | \\ IHdlYmhvb2sifX0=\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ / Note: You will see the message printed in the workflow logs contains both the event context and data, with data being base64 encoded. In later sections, we will see how to extract particular key-value from event context or data and pass it to the workflow as arguments. Troubleshoot \u00b6 If you don't see the event-source and sensor pod in argo-events namespace, Inspect the event-source. kubectl -n argo-events get eventsource event-source-object-name -o yaml Inspect the sensor. kubectl -n argo-events get sensor sensor-object-name -o yaml and look for any errors within the Status . Make sure the correct Role and RoleBindings are applied to the service account and there are no errors in both event-source and sensor controller. Check the logs of event-source and sensor controller. Make sure the controllers have processed the event-source and sensor objects and there are no errors. Raise an issue on GitHub or post a question on argo-events slack channel.","title":"Introduction"},{"location":"tutorials/01-introduction/#introduction","text":"In the tutorials, we will cover every aspect of Argo Events and demonstrate how you can leverage these features to build an event driven workflow pipeline. All the concepts you will learn in this tutorial and subsequent ones can be applied to any type of event-source.","title":"Introduction"},{"location":"tutorials/01-introduction/#prerequisites","text":"Follow the installation guide to set up the Argo Events. Make sure to configure Argo Workflow controller to listen to workflow objects created in argo-events namespace. (See this link.) The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no \"--namespaced\" argument) so that it has visibility to all namespaces, or with \"--managed-namespace\" set to define \"argo-events\" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file: kubectl apply - n argo - f https : // github . com / argoproj / argo - workflows / releases / latest / download / install . yaml Make sure to read the concepts behind eventbus . sensor . event source . Follow the instruction to create a Service Account operate-workflow-sa with proper privileges, and make sure the Service Account used by Workflows (here we use default in the tutorials for demonstration purpose) has proper RBAC settings.","title":"Prerequisites"},{"location":"tutorials/01-introduction/#get-started","text":"We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. Let' set up the eventbus. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Create the webhook event source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the webhook sensor. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml If the commands are executed successfully, the eventbus, event-source and sensor pods will get created. You will also notice that a service is created for the event-source. Expose the event-source pod via Ingress, OpenShift Route or port forward to consume requests over HTTP. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf Make sure the workflow pod ran successfully. argo logs - n argo - events @latest Should result in something similar to what is below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"38376665363064642d343336352d34 | | 3035372d393766662d366234326130656232343 | | 337\" , \"time\" : \"2020-01-11T16:55:42.996636 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIzOCJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | | jp7Im1lc3NhZ2UiOiJ0aGlzIGlzIG15IGZpcnN0 | \\ IHdlYmhvb2sifX0=\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ / Note: You will see the message printed in the workflow logs contains both the event context and data, with data being base64 encoded. In later sections, we will see how to extract particular key-value from event context or data and pass it to the workflow as arguments.","title":"Get Started"},{"location":"tutorials/01-introduction/#troubleshoot","text":"If you don't see the event-source and sensor pod in argo-events namespace, Inspect the event-source. kubectl -n argo-events get eventsource event-source-object-name -o yaml Inspect the sensor. kubectl -n argo-events get sensor sensor-object-name -o yaml and look for any errors within the Status . Make sure the correct Role and RoleBindings are applied to the service account and there are no errors in both event-source and sensor controller. Check the logs of event-source and sensor controller. Make sure the controllers have processed the event-source and sensor objects and there are no errors. Raise an issue on GitHub or post a question on argo-events slack channel.","title":"Troubleshoot"},{"location":"tutorials/02-parameterization/","text":"Parameterization \u00b6 In the previous section, we saw how to set up a basic webhook event-source and sensor. The trigger template had parameters set in the sensor object, and the workflow was able to print the event payload. In this tutorial, we will dig deeper into different types of parameterization, how to extract particular key-value from event payload and how to use default values if certain key is not available within event payload. Trigger Resource Parameterization \u00b6 If you take a closer look at the Sensor object, you will notice it contains a list of triggers. Each Trigger contains the template that defines the context of the trigger and actual resource that we expect the sensor to execute. In the previous section, the resource within the trigger template was an Argo workflow. This subsection deals with how to parameterize the resource within trigger template with the event payload. Prerequisites \u00b6 Make sure to have the basic webhook event-source and sensor set up. Follow the introduction tutorial if haven't done already. Webhook Event Payload \u00b6 Webhook event-source consumes events through HTTP requests and transforms them into CloudEvents. The structure of the event the Webhook sensor receives from the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Context : This is the CloudEvent context and it is populated by the event-source regardless of type of HTTP request. Data : Data contains following fields. Header : The header within event data contains the headers in the HTTP request that was dispatched to the event-source. The event-source extracts the headers from the request and put it in the header within event data . Body : This is the request payload from the HTTP request. Event Context \u00b6 Now that we have an understanding of the structure of the event the webhook sensor receives from the event-source over the eventbus, lets see how we can use the event context to parameterize the Argo workflow. Update the Webhook Sensor and add the contextKey for the parameter at index 0. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-01.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, _________ < webhook > --------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ We have successfully extracted the type key within the event context and parameterized the workflow to print the value of the type . Event Data \u00b6 Now, it is time to use the event data and parameterize the Argo workflow trigger. We will extract the message from request payload and get the Argo workflow to print the message. Update the Webhook Sensor and add the dataKey in the parameter at index 0. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-02.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, __________________________ < this is my first webhook > -------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Yay!! The Argo workflow printed the message. You can add however many number of parameters to update the trigger resource on the fly. Note : If you define both the contextKey and dataKey within a parameter, then the dataKey takes the precedence. Note : When useRawData is not specified or explicitly set to false, the parameter will resolve to a string type. When useRawData is set to true, a number, boolean, json or string parameter may be resolved. Default Values \u00b6 Each parameter comes with an option to configure the default value. This is specially important when the key you defined in the parameter doesn't exist in the event. Update the Webhook Sensor and add the value for the parameter at index 0. We will also update the dataKey to an unknown event key. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-03.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, _______________________ < wow! a default value. > ----------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Sprig Templates \u00b6 The sprig template exposed through contextTemplate and dataTemplate lets you alter the event context and event data before it gets applied to the trigger via parameters . Take a look at the example defined here , it contains the parameters as follows, parameters : # Retrieve the 'message' key from the payload - src : dependencyName : test - dep dataTemplate : \"{{ .Input.body.message | title }}\" dest : spec . arguments . parameters . 0. value # Title case the context subject - src : dependencyName : test - dep contextTemplate : \"{{ .Input.subject | title }}\" dest : spec . arguments . parameters . 1. value # Retrieve the 'name' key from the payload, remove all whitespace and lowercase it. - src : dependencyName : test - dep dataTemplate : \"{{ .Input.body.name | nospace | lower }}-\" dest : metadata . generateName operation : append Consider the event the sensor received has format like, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : { \"name\" : \"foo bar\" , \"message\" : \"hello there!!\" }, } } The parameters are transformed as, The first parameter extracts the body.message from event data and applies title filter which basically capitalizes the first letter and replaces the spec.arguments.parameters.0.value . The second parameter extracts the subject from the event context and again applies title filter and replaces the spec.arguments.parameters.1.value . The third parameter extracts the body.name from the event data, applies nospace filter which removes all white spaces and then lower filter which lowercases the text and appends it to metadata.generateName . Send a curl request to event-source as follows, curl -d '{\"name\":\"foo bar\", \"message\": \"hello there!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example and you will see an Argo workflow being sprung with name like webhook-foobar-xxxxx . Check the output of the workflow, it should print something like, ____________________________ < Hello There!! from Example > ---------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Operations \u00b6 Sometimes you need the ability to append or prepend a parameter value to an existing value in trigger resource. This is where the operation field within a parameter comes handy. Update the Webhook Sensor and add the operation in the parameter at index 0. We will prepend the message to an existing value. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-04.yaml Send a HTTP request to the event-source. curl -d '{\"message\":\"hey!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, __________________ < hey!!hello world > ------------------ \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Trigger Template Parameterization \u00b6 The parameterization you saw above deals with the trigger resource, but sometimes you need to parameterize the trigger template itself. This comes handy when you have the trigger resource stored on some external source like S3, Git, etc. and you need to replace the url of the source on the fly in trigger template. Imagine a scenario where you want to parameterize the parameters of trigger to parameterize the trigger resource. What?... The sensor you have been using in this tutorial has one parameter defined in the trigger resource under k8s . We will parameterize that parameter by applying a parameter at the trigger template level. Update the Webhook Sensor and add parameters at trigger level. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-05.yaml Send a HTTP request to the event-source. curl -d '{\"dependencyName\":\"test-dep\", \"dataKey\": \"body.message\", \"dest\": \"spec.arguments.parameters.0.value\", \"message\": \"amazing!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, ___________ < amazing!! > ----------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Great!! You have now learned how to apply parameters at trigger resource and template level. Keep in mind that you can apply default values and operations like prepend and append for trigger template parameters as well.","title":"Parameterization"},{"location":"tutorials/02-parameterization/#parameterization","text":"In the previous section, we saw how to set up a basic webhook event-source and sensor. The trigger template had parameters set in the sensor object, and the workflow was able to print the event payload. In this tutorial, we will dig deeper into different types of parameterization, how to extract particular key-value from event payload and how to use default values if certain key is not available within event payload.","title":"Parameterization"},{"location":"tutorials/02-parameterization/#trigger-resource-parameterization","text":"If you take a closer look at the Sensor object, you will notice it contains a list of triggers. Each Trigger contains the template that defines the context of the trigger and actual resource that we expect the sensor to execute. In the previous section, the resource within the trigger template was an Argo workflow. This subsection deals with how to parameterize the resource within trigger template with the event payload.","title":"Trigger Resource Parameterization"},{"location":"tutorials/02-parameterization/#prerequisites","text":"Make sure to have the basic webhook event-source and sensor set up. Follow the introduction tutorial if haven't done already.","title":"Prerequisites"},{"location":"tutorials/02-parameterization/#webhook-event-payload","text":"Webhook event-source consumes events through HTTP requests and transforms them into CloudEvents. The structure of the event the Webhook sensor receives from the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Context : This is the CloudEvent context and it is populated by the event-source regardless of type of HTTP request. Data : Data contains following fields. Header : The header within event data contains the headers in the HTTP request that was dispatched to the event-source. The event-source extracts the headers from the request and put it in the header within event data . Body : This is the request payload from the HTTP request.","title":"Webhook Event Payload"},{"location":"tutorials/02-parameterization/#event-context","text":"Now that we have an understanding of the structure of the event the webhook sensor receives from the event-source over the eventbus, lets see how we can use the event context to parameterize the Argo workflow. Update the Webhook Sensor and add the contextKey for the parameter at index 0. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-01.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, _________ < webhook > --------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ We have successfully extracted the type key within the event context and parameterized the workflow to print the value of the type .","title":"Event Context"},{"location":"tutorials/02-parameterization/#event-data","text":"Now, it is time to use the event data and parameterize the Argo workflow trigger. We will extract the message from request payload and get the Argo workflow to print the message. Update the Webhook Sensor and add the dataKey in the parameter at index 0. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-02.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, __________________________ < this is my first webhook > -------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Yay!! The Argo workflow printed the message. You can add however many number of parameters to update the trigger resource on the fly. Note : If you define both the contextKey and dataKey within a parameter, then the dataKey takes the precedence. Note : When useRawData is not specified or explicitly set to false, the parameter will resolve to a string type. When useRawData is set to true, a number, boolean, json or string parameter may be resolved.","title":"Event Data"},{"location":"tutorials/02-parameterization/#default-values","text":"Each parameter comes with an option to configure the default value. This is specially important when the key you defined in the parameter doesn't exist in the event. Update the Webhook Sensor and add the value for the parameter at index 0. We will also update the dataKey to an unknown event key. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-03.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, _______________________ < wow! a default value. > ----------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Default Values"},{"location":"tutorials/02-parameterization/#sprig-templates","text":"The sprig template exposed through contextTemplate and dataTemplate lets you alter the event context and event data before it gets applied to the trigger via parameters . Take a look at the example defined here , it contains the parameters as follows, parameters : # Retrieve the 'message' key from the payload - src : dependencyName : test - dep dataTemplate : \"{{ .Input.body.message | title }}\" dest : spec . arguments . parameters . 0. value # Title case the context subject - src : dependencyName : test - dep contextTemplate : \"{{ .Input.subject | title }}\" dest : spec . arguments . parameters . 1. value # Retrieve the 'name' key from the payload, remove all whitespace and lowercase it. - src : dependencyName : test - dep dataTemplate : \"{{ .Input.body.name | nospace | lower }}-\" dest : metadata . generateName operation : append Consider the event the sensor received has format like, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : { \"name\" : \"foo bar\" , \"message\" : \"hello there!!\" }, } } The parameters are transformed as, The first parameter extracts the body.message from event data and applies title filter which basically capitalizes the first letter and replaces the spec.arguments.parameters.0.value . The second parameter extracts the subject from the event context and again applies title filter and replaces the spec.arguments.parameters.1.value . The third parameter extracts the body.name from the event data, applies nospace filter which removes all white spaces and then lower filter which lowercases the text and appends it to metadata.generateName . Send a curl request to event-source as follows, curl -d '{\"name\":\"foo bar\", \"message\": \"hello there!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example and you will see an Argo workflow being sprung with name like webhook-foobar-xxxxx . Check the output of the workflow, it should print something like, ____________________________ < Hello There!! from Example > ---------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Sprig Templates"},{"location":"tutorials/02-parameterization/#operations","text":"Sometimes you need the ability to append or prepend a parameter value to an existing value in trigger resource. This is where the operation field within a parameter comes handy. Update the Webhook Sensor and add the operation in the parameter at index 0. We will prepend the message to an existing value. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-04.yaml Send a HTTP request to the event-source. curl -d '{\"message\":\"hey!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, __________________ < hey!!hello world > ------------------ \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Operations"},{"location":"tutorials/02-parameterization/#trigger-template-parameterization","text":"The parameterization you saw above deals with the trigger resource, but sometimes you need to parameterize the trigger template itself. This comes handy when you have the trigger resource stored on some external source like S3, Git, etc. and you need to replace the url of the source on the fly in trigger template. Imagine a scenario where you want to parameterize the parameters of trigger to parameterize the trigger resource. What?... The sensor you have been using in this tutorial has one parameter defined in the trigger resource under k8s . We will parameterize that parameter by applying a parameter at the trigger template level. Update the Webhook Sensor and add parameters at trigger level. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-05.yaml Send a HTTP request to the event-source. curl -d '{\"dependencyName\":\"test-dep\", \"dataKey\": \"body.message\", \"dest\": \"spec.arguments.parameters.0.value\", \"message\": \"amazing!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, ___________ < amazing!! > ----------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Great!! You have now learned how to apply parameters at trigger resource and template level. Keep in mind that you can apply default values and operations like prepend and append for trigger template parameters as well.","title":"Trigger Template Parameterization"},{"location":"tutorials/03-trigger-sources/","text":"Trigger Sources \u00b6 A trigger source is the source of trigger resource. It can be either external source such as Git , S3 , K8s Configmap , File , any valid URL that hosts the resource or an internal resource which is defined in the sensor object itself like Inline or Resource . In the previous sections, you have been dealing with the Resource trigger source. In this tutorial, we will explore other trigger sources. Prerequisites \u00b6 The Webhook event-source is already set up. Git \u00b6 Git trigger source refers to K8s trigger refers to the K8s resource stored in Git. The specification for the Git source is available here . In order to fetch data from git, you need to set up the private SSH key in sensor. If you don't have ssh keys available, create them following this guide . Create a K8s secret that holds the SSH keys. kubectl -n argo-events create secret generic git-ssh --from-file=key=.ssh/ Create a K8s secret that holds known hosts. kubectl -n argo-events create secret generic git-known-hosts --from-file=ssh_known_hosts=.ssh/known_hosts Create a sensor with the git trigger source and refer it to the hello world workflow stored on the Argo Git project. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-git.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf S3 \u00b6 You can refer to the K8s resource stored on S3 compliant store as the trigger source. For this tutorial, lets set up a minio server which is S3 compliant store. Create a K8s secret called artifacts-minio that holds your minio access key and secret key. The access key must be stored under accesskey key and secret key must be stored under secretkey . Follow steps described here to set up the minio server. Make sure a service is available to expose the minio server. Create a bucket called workflows and store a basic hello world Argo workflow with key name hello-world.yaml . Create the sensor with trigger source as S3. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-minio.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf K8s Configmap \u00b6 K8s configmap can be treated as trigger source if needed. Lets create a configmap called trigger-store . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/trigger-store.yaml Create a sensor with trigger source as configmap and refer it to the trigger-store . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-cm.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf File & URL \u00b6 File and URL trigger sources are pretty self explanatory. The example sensors are available under examples/sensors folder.","title":"Trigger Sources"},{"location":"tutorials/03-trigger-sources/#trigger-sources","text":"A trigger source is the source of trigger resource. It can be either external source such as Git , S3 , K8s Configmap , File , any valid URL that hosts the resource or an internal resource which is defined in the sensor object itself like Inline or Resource . In the previous sections, you have been dealing with the Resource trigger source. In this tutorial, we will explore other trigger sources.","title":"Trigger Sources"},{"location":"tutorials/03-trigger-sources/#prerequisites","text":"The Webhook event-source is already set up.","title":"Prerequisites"},{"location":"tutorials/03-trigger-sources/#git","text":"Git trigger source refers to K8s trigger refers to the K8s resource stored in Git. The specification for the Git source is available here . In order to fetch data from git, you need to set up the private SSH key in sensor. If you don't have ssh keys available, create them following this guide . Create a K8s secret that holds the SSH keys. kubectl -n argo-events create secret generic git-ssh --from-file=key=.ssh/ Create a K8s secret that holds known hosts. kubectl -n argo-events create secret generic git-known-hosts --from-file=ssh_known_hosts=.ssh/known_hosts Create a sensor with the git trigger source and refer it to the hello world workflow stored on the Argo Git project. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-git.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf","title":"Git"},{"location":"tutorials/03-trigger-sources/#s3","text":"You can refer to the K8s resource stored on S3 compliant store as the trigger source. For this tutorial, lets set up a minio server which is S3 compliant store. Create a K8s secret called artifacts-minio that holds your minio access key and secret key. The access key must be stored under accesskey key and secret key must be stored under secretkey . Follow steps described here to set up the minio server. Make sure a service is available to expose the minio server. Create a bucket called workflows and store a basic hello world Argo workflow with key name hello-world.yaml . Create the sensor with trigger source as S3. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-minio.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf","title":"S3"},{"location":"tutorials/03-trigger-sources/#k8s-configmap","text":"K8s configmap can be treated as trigger source if needed. Lets create a configmap called trigger-store . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/trigger-store.yaml Create a sensor with trigger source as configmap and refer it to the trigger-store . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-cm.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf","title":"K8s Configmap"},{"location":"tutorials/03-trigger-sources/#file-url","text":"File and URL trigger sources are pretty self explanatory. The example sensors are available under examples/sensors folder.","title":"File & URL"},{"location":"tutorials/04-standard-k8s-resources/","text":"Trigger Standard K8s Resources \u00b6 In the previous sections, you saw how to trigger the Argo workflows. In this tutorial, you will see how to trigger Pod and Deployment. Note: You can trigger any standard Kubernetes object. Having the ability to trigger standard Kubernetes resources is quite powerful as provides an avenue to set up pipelines for existing workloads. Prerequisites \u00b6 Make sure that the service account used by the Sensor has necessary permissions to create the Kubernetes resource of your choice. We use k8s-resource-sa for below examples, it should be bound to a Role like following. apiVersion : rbac . authorization . k8s . io / v1 kind : Role metadata : name : create - deploy - pod - role rules : - apiGroups : - \"\" resources : - pods verbs : - create - apiGroups : - apps resources : - deployments verbs : - create The Webhook event-source is already set up. Pod \u00b6 Create a sensor with K8s trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-pod.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see a pod being created. kubectl -n argo-events get po After the pod was completed, inspect the logs of the pod, you will something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ / Deployment \u00b6 Lets create a sensor with a K8s deployment as trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-deployment.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see a deployment being created. Get the corresponding pod. kubectl -n argo-events get deployments After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ /","title":"Trigger Standard K8s Resources"},{"location":"tutorials/04-standard-k8s-resources/#trigger-standard-k8s-resources","text":"In the previous sections, you saw how to trigger the Argo workflows. In this tutorial, you will see how to trigger Pod and Deployment. Note: You can trigger any standard Kubernetes object. Having the ability to trigger standard Kubernetes resources is quite powerful as provides an avenue to set up pipelines for existing workloads.","title":"Trigger Standard K8s Resources"},{"location":"tutorials/04-standard-k8s-resources/#prerequisites","text":"Make sure that the service account used by the Sensor has necessary permissions to create the Kubernetes resource of your choice. We use k8s-resource-sa for below examples, it should be bound to a Role like following. apiVersion : rbac . authorization . k8s . io / v1 kind : Role metadata : name : create - deploy - pod - role rules : - apiGroups : - \"\" resources : - pods verbs : - create - apiGroups : - apps resources : - deployments verbs : - create The Webhook event-source is already set up.","title":"Prerequisites"},{"location":"tutorials/04-standard-k8s-resources/#pod","text":"Create a sensor with K8s trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-pod.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see a pod being created. kubectl -n argo-events get po After the pod was completed, inspect the logs of the pod, you will something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ /","title":"Pod"},{"location":"tutorials/04-standard-k8s-resources/#deployment","text":"Lets create a sensor with a K8s deployment as trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-deployment.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see a deployment being created. Get the corresponding pod. kubectl -n argo-events get deployments After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ /","title":"Deployment"},{"location":"tutorials/05-trigger-custom-resources/","text":"Trigger Custom Resources \u00b6 Take a look at Build Your Own Trigger to customize the sensor.","title":"Trigger Custom Resources"},{"location":"tutorials/05-trigger-custom-resources/#trigger-custom-resources","text":"Take a look at Build Your Own Trigger to customize the sensor.","title":"Trigger Custom Resources"},{"location":"tutorials/06-trigger-conditions/","text":"Trigger Conditions \u00b6 In the previous sections, you have been dealing with just a single dependency. But, in many cases, you want to wait for multiple events to occur and then trigger a resource which means you need a mechanism to determine which triggers to execute based on set of different event dependencies. This mechanism is supported through conditions . Note : Whenever you define multiple dependencies in a sensor, the sensor applies a AND operation, meaning, it will wait for all dependencies to resolve before it executes triggers. conditions can modify that behavior. Prerequisite \u00b6 Minio server must be set up in the argo-events namespace with a bucket called test and it should be available at minio-service.argo-events:9000 . Conditions \u00b6 Consider a scenario where you have a Webhook and Minio event-source, and you want to trigger an Argo workflow if the sensor receives an event from the Webhook event-source, but, another workflow if it receives an event from the Minio event-source. Create the webhook event-source and event-source. The event-source listens to HTTP requests on port 12000 . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/webhook-event-source.yaml Create the minio event-source. The event-source listens to events of type PUT and DELETE for objects in bucket test . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/minio-event-source.yaml Make sure there are no errors in any of the event-sources. Let's create the sensor. If you take a closer look at the trigger templates, you will notice that it contains a field named conditions , which is a boolean expression contains dependency names. So, as soon as the expression is resolved as true, the corresponding trigger will be executed. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/sensor-01.yaml Send a HTTP request to Webhook event-source. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice an Argo workflow with name group-1-xxxx is created with following output, __________________________ < this is my first webhook > -------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Now, lets generate a Minio event so that we can run group-2-xxxx workflow. Drop a file onto test bucket. The workflow that will get created will print the name of the bucket as follows, ______ < test > ------ \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Great!! You have now learned how to use conditions . Lets update the sensor with a trigger that waits for both dependencies to resolve. This is the normal sensor behavior if conditions is not defined. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/sensor-02.yaml Send a HTTP request and perform a file drop on Minio bucket as done above. You should get the following output. _______________________________ < this is my first webhook test > ------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Trigger Conditions"},{"location":"tutorials/06-trigger-conditions/#trigger-conditions","text":"In the previous sections, you have been dealing with just a single dependency. But, in many cases, you want to wait for multiple events to occur and then trigger a resource which means you need a mechanism to determine which triggers to execute based on set of different event dependencies. This mechanism is supported through conditions . Note : Whenever you define multiple dependencies in a sensor, the sensor applies a AND operation, meaning, it will wait for all dependencies to resolve before it executes triggers. conditions can modify that behavior.","title":"Trigger Conditions"},{"location":"tutorials/06-trigger-conditions/#prerequisite","text":"Minio server must be set up in the argo-events namespace with a bucket called test and it should be available at minio-service.argo-events:9000 .","title":"Prerequisite"},{"location":"tutorials/06-trigger-conditions/#conditions","text":"Consider a scenario where you have a Webhook and Minio event-source, and you want to trigger an Argo workflow if the sensor receives an event from the Webhook event-source, but, another workflow if it receives an event from the Minio event-source. Create the webhook event-source and event-source. The event-source listens to HTTP requests on port 12000 . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/webhook-event-source.yaml Create the minio event-source. The event-source listens to events of type PUT and DELETE for objects in bucket test . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/minio-event-source.yaml Make sure there are no errors in any of the event-sources. Let's create the sensor. If you take a closer look at the trigger templates, you will notice that it contains a field named conditions , which is a boolean expression contains dependency names. So, as soon as the expression is resolved as true, the corresponding trigger will be executed. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/sensor-01.yaml Send a HTTP request to Webhook event-source. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice an Argo workflow with name group-1-xxxx is created with following output, __________________________ < this is my first webhook > -------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Now, lets generate a Minio event so that we can run group-2-xxxx workflow. Drop a file onto test bucket. The workflow that will get created will print the name of the bucket as follows, ______ < test > ------ \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Great!! You have now learned how to use conditions . Lets update the sensor with a trigger that waits for both dependencies to resolve. This is the normal sensor behavior if conditions is not defined. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/sensor-02.yaml Send a HTTP request and perform a file drop on Minio bucket as done above. You should get the following output. _______________________________ < this is my first webhook test > ------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Conditions"},{"location":"tutorials/07-policy/","text":"Policy \u00b6 A policy for a trigger determines whether the trigger resulted in success or failure. Currently, Argo Events supports 2 types of policies: Policy based on the K8s resource labels. Policy based on the response status for triggers like HTTP request, AWS Lambda, etc. Resource Labels Policy \u00b6 This type of policy determines whether trigger completed successfully based on the labels set on the trigger resource. Consider a sensor which has an Argo workflow as the trigger. When an Argo workflow completes successfully, the workflow controller sets a label on the resource as workflows.argoproj.io/completed: 'true' . So, in order for sensor to determine whether the trigger workflow completed successfully, you just need to set the policy labels as workflows.argoproj.io/completed: 'true' under trigger template. In addition to labels, you can also define a backoff and option to error out if sensor is unable to determine status of the trigger after the backoff completes. Check out the specification of resource labels policy here . Status Policy \u00b6 For triggers like HTTP request or AWS Lambda, you can apply the Status Policy to determine the trigger status. The Status Policy supports list of expected response statuses. If the status of the HTTP request or Lambda is within the statuses defined in the policy, then the trigger is considered successful. Complete specification is available here .","title":"Policy"},{"location":"tutorials/07-policy/#policy","text":"A policy for a trigger determines whether the trigger resulted in success or failure. Currently, Argo Events supports 2 types of policies: Policy based on the K8s resource labels. Policy based on the response status for triggers like HTTP request, AWS Lambda, etc.","title":"Policy"},{"location":"tutorials/07-policy/#resource-labels-policy","text":"This type of policy determines whether trigger completed successfully based on the labels set on the trigger resource. Consider a sensor which has an Argo workflow as the trigger. When an Argo workflow completes successfully, the workflow controller sets a label on the resource as workflows.argoproj.io/completed: 'true' . So, in order for sensor to determine whether the trigger workflow completed successfully, you just need to set the policy labels as workflows.argoproj.io/completed: 'true' under trigger template. In addition to labels, you can also define a backoff and option to error out if sensor is unable to determine status of the trigger after the backoff completes. Check out the specification of resource labels policy here .","title":"Resource Labels Policy"},{"location":"tutorials/07-policy/#status-policy","text":"For triggers like HTTP request or AWS Lambda, you can apply the Status Policy to determine the trigger status. The Status Policy supports list of expected response statuses. If the status of the HTTP request or Lambda is within the statuses defined in the policy, then the trigger is considered successful. Complete specification is available here .","title":"Status Policy"}]} \ No newline at end of file +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Argo Events - The Event-driven Workflow Automation Framework \u00b6 What is Argo Events? \u00b6 Argo Events is an event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources like webhooks, S3, schedules, messaging queues, gcp pubsub, sns, sqs, etc. Features \u00b6 Supports events from 20+ event sources. Ability to customize business-level constraint logic for workflow automation. Manage everything from simple, linear, real-time to complex, multi-source events. Supports Kubernetes Objects, Argo Workflow, AWS Lambda, Serverless, etc. as triggers. CloudEvents compliant. Getting Started \u00b6 Follow these instruction to set up Argo Events. Documentation \u00b6 Concepts . Argo Events in action . Deep dive into Argo Events . Triggers \u00b6 Argo Workflows Standard K8s Objects HTTP Requests / Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) AWS Lambda NATS Messages Kafka Messages Slack Notifications Azure Event Hubs Messages Argo Rollouts Custom Trigger / Build Your Own Trigger Apache OpenWhisk Log Trigger Event Sources \u00b6 Argo Events supports 20+ event sources. The complete list of event sources is available here . Who uses Argo Events? \u00b6 Check the list to see who are officially using Argo Events. Please send a PR with your organization name if you are using Argo Events. Community Blogs and Presentations \u00b6 Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts Argo Events - Event-Based Dependency Manager for Kubernetes Argo Events Deep-dive Automating Research Workflows at BlackRock Designing A Complete CI/CD Pipeline CI/CD Pipeline Using Argo Events, Workflows, and CD TGI Kubernetes with Joe Beda: CloudEvents and Argo Events","title":"Home"},{"location":"#argo-events-the-event-driven-workflow-automation-framework","text":"","title":"Argo Events - The Event-driven Workflow Automation Framework"},{"location":"#what-is-argo-events","text":"Argo Events is an event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. on events from a variety of sources like webhooks, S3, schedules, messaging queues, gcp pubsub, sns, sqs, etc.","title":"What is Argo Events?"},{"location":"#features","text":"Supports events from 20+ event sources. Ability to customize business-level constraint logic for workflow automation. Manage everything from simple, linear, real-time to complex, multi-source events. Supports Kubernetes Objects, Argo Workflow, AWS Lambda, Serverless, etc. as triggers. CloudEvents compliant.","title":"Features"},{"location":"#getting-started","text":"Follow these instruction to set up Argo Events.","title":"Getting Started"},{"location":"#documentation","text":"Concepts . Argo Events in action . Deep dive into Argo Events .","title":"Documentation"},{"location":"#triggers","text":"Argo Workflows Standard K8s Objects HTTP Requests / Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) AWS Lambda NATS Messages Kafka Messages Slack Notifications Azure Event Hubs Messages Argo Rollouts Custom Trigger / Build Your Own Trigger Apache OpenWhisk Log Trigger","title":"Triggers"},{"location":"#event-sources","text":"Argo Events supports 20+ event sources. The complete list of event sources is available here .","title":"Event Sources"},{"location":"#who-uses-argo-events","text":"Check the list to see who are officially using Argo Events. Please send a PR with your organization name if you are using Argo Events.","title":"Who uses Argo Events?"},{"location":"#community-blogs-and-presentations","text":"Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts Argo Events - Event-Based Dependency Manager for Kubernetes Argo Events Deep-dive Automating Research Workflows at BlackRock Designing A Complete CI/CD Pipeline CI/CD Pipeline Using Argo Events, Workflows, and CD TGI Kubernetes with Joe Beda: CloudEvents and Argo Events","title":"Community Blogs and Presentations"},{"location":"APIs/","text":"Packages: argoproj.io/v1alpha1 argoproj.io/v1alpha1 Resource Types: AMQPConsumeConfig ( Appears on: AMQPEventSource ) AMQPConsumeConfig holds the configuration to immediately starts delivering queued messages Field Description consumerTag string (Optional) ConsumerTag is the identity of the consumer included in every delivery autoAck bool (Optional) AutoAck when true, the server will acknowledge deliveries to this consumer prior to writing the delivery to the network exclusive bool (Optional) Exclusive when true, the server will ensure that this is the sole consumer from this queue noLocal bool (Optional) NoLocal flag is not supported by RabbitMQ noWait bool (Optional) NowWait when true, do not wait for the server to confirm the request and immediately begin deliveries AMQPEventSource ( Appears on: EventSourceSpec ) AMQPEventSource refers to an event-source for AMQP stream events Field Description url string URL for rabbitmq service exchangeName string ExchangeName is the exchange name For more information, visit https://www.rabbitmq.com/tutorials/amqp-concepts.html exchangeType string ExchangeType is rabbitmq exchange type routingKey string Routing key for bindings connectionBackoff Backoff (Optional) Backoff holds parameters applied to connection. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON tls TLSConfig (Optional) TLS configuration for the amqp client. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. exchangeDeclare AMQPExchangeDeclareConfig (Optional) ExchangeDeclare holds the configuration for the exchange on the server For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.ExchangeDeclare queueDeclare AMQPQueueDeclareConfig (Optional) QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn\u2019t already exist, or ensures that an existing queue matches the same parameters For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueDeclare queueBind AMQPQueueBindConfig (Optional) QueueBind holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueBind consume AMQPConsumeConfig (Optional) Consume holds the configuration to immediately starts delivering queued messages For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.Consume auth BasicAuth (Optional) Auth hosts secret selectors for username and password urlSecret Kubernetes core/v1.SecretKeySelector URLSecret is secret reference for rabbitmq service URL filter EventSourceFilter (Optional) Filter AMQPExchangeDeclareConfig ( Appears on: AMQPEventSource ) AMQPExchangeDeclareConfig holds the configuration for the exchange on the server Field Description durable bool (Optional) Durable keeps the exchange also after the server restarts autoDelete bool (Optional) AutoDelete removes the exchange when no bindings are active internal bool (Optional) Internal when true does not accept publishings noWait bool (Optional) NowWait when true does not wait for a confirmation from the server AMQPQueueBindConfig ( Appears on: AMQPEventSource ) AMQPQueueBindConfig holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key Field Description noWait bool (Optional) NowWait false and the queue could not be bound, the channel will be closed with an error AMQPQueueDeclareConfig ( Appears on: AMQPEventSource ) AMQPQueueDeclareConfig holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn\u2019t already exist, or ensures that an existing queue matches the same parameters Field Description name string (Optional) Name of the queue. If empty the server auto-generates a unique name for this queue durable bool (Optional) Durable keeps the queue also after the server restarts autoDelete bool (Optional) AutoDelete removes the queue when no consumers are active exclusive bool (Optional) Exclusive sets the queues to be accessible only by the connection that declares them and will be deleted wgen the connection closes noWait bool (Optional) NowWait when true, the queue assumes to be declared on the server arguments string (Optional) Arguments of a queue (also known as \u201cx-arguments\u201d) used for optional features and plugins AWSLambdaTrigger ( Appears on: TriggerTemplate ) AWSLambdaTrigger refers to specification of the trigger to invoke an AWS Lambda function Field Description functionName string FunctionName refers to the name of the function to invoke. accessKey Kubernetes core/v1.SecretKeySelector (Optional) AccessKey refers K8s secret containing aws access key secretKey Kubernetes core/v1.SecretKeySelector (Optional) SecretKey refers K8s secret containing aws secret key region string Region is AWS region payload \\[\\]TriggerParameter Payload is the list of key-value extracted from an event payload to construct the request payload. parameters \\[\\]TriggerParameter (Optional) Parameters is the list of key-value extracted from event\u2019s payload that are applied to the trigger resource. invocationType string (Optional) Choose from the following options. RequestResponse (default) - Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API response includes the function response and additional data. Event - Invoke the function asynchronously. Send events that fail multiple times to the function\u2019s dead-letter queue (if it\u2019s configured). The API response only includes a status code. DryRun - Validate parameter values and verify that the user or role has permission to invoke the function. roleARN string (Optional) RoleARN is the Amazon Resource Name (ARN) of the role to assume. Amount ( Appears on: Backoff ) Amount represent a numeric amount. Field Description value \\[\\]byte ArgoWorkflowOperation ( string alias) ( Appears on: ArgoWorkflowTrigger ) ArgoWorkflowOperation refers to the type of the operation performed on the Argo Workflow ArgoWorkflowTrigger ( Appears on: TriggerTemplate ) ArgoWorkflowTrigger is the trigger for the Argo Workflow Field Description source ArtifactLocation Source of the K8s resource file(s) operation ArgoWorkflowOperation (Optional) Operation refers to the type of operation performed on the argo workflow resource. Default value is Submit. parameters \\[\\]TriggerParameter Parameters is the list of parameters to pass to resolved Argo Workflow object args \\[\\]string Args is the list of arguments to pass to the argo CLI ArtifactLocation ( Appears on: ArgoWorkflowTrigger , StandardK8STrigger ) ArtifactLocation describes the source location for an external artifact Field Description s3 S3Artifact S3 compliant artifact inline string Inline artifact is embedded in sensor spec as a string file FileArtifact File artifact is artifact stored in a file url URLArtifact URL to fetch the artifact from configmap Kubernetes core/v1.ConfigMapKeySelector Configmap that stores the artifact git GitArtifact Git repository hosting the artifact resource K8SResource Resource is generic template for K8s resource AuthStrategy ( string alias) ( Appears on: NATSConfig , NativeStrategy ) AuthStrategy is the auth strategy of native nats installaion AzureEventHubsTrigger ( Appears on: TriggerTemplate ) AzureEventHubsTrigger refers to specification of the Azure Event Hubs Trigger Field Description fqdn string FQDN refers to the namespace dns of Azure Event Hubs to be used i.e. .servicebus.windows.net hubName string HubName refers to the Azure Event Hub to send events to sharedAccessKeyName Kubernetes core/v1.SecretKeySelector SharedAccessKeyName refers to the name of the Shared Access Key sharedAccessKey Kubernetes core/v1.SecretKeySelector SharedAccessKey refers to a K8s secret containing the primary key for the payload \\[\\]TriggerParameter Payload is the list of key-value extracted from an event payload to construct the request payload. parameters \\[\\]TriggerParameter (Optional) Parameters is the list of key-value extracted from event\u2019s payload that are applied to the trigger resource. AzureEventsHubEventSource ( Appears on: EventSourceSpec ) AzureEventsHubEventSource describes the event source for azure events hub More info at https://docs.microsoft.com/en-us/azure/event-hubs/ Field Description fqdn string FQDN of the EventHubs namespace you created More info at https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string sharedAccessKeyName Kubernetes core/v1.SecretKeySelector SharedAccessKeyName is the name you chose for your application\u2019s SAS keys sharedAccessKey Kubernetes core/v1.SecretKeySelector SharedAccessKey is the generated value of the key hubName string Event Hub path/name metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter AzureQueueStorageEventSource ( Appears on: EventSourceSpec ) AzureQueueStorageEventSource describes the event source for azure queue storage more info at https://learn.microsoft.com/en-us/azure/storage/queues/ Field Description storageAccountName string (Optional) StorageAccountName is the name of the storage account where the queue is. This field is necessary to access via Azure AD (managed identity) and it is ignored if ConnectionString is set. connectionString Kubernetes core/v1.SecretKeySelector (Optional) ConnectionString is the connection string to access Azure Queue Storage. If this fields is not provided it will try to access via Azure AD with StorageAccountName. queueName string QueueName is the name of the queue jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON dlq bool (Optional) DLQ specifies if a dead-letter queue is configured for messages that can\u2019t be processed successfully. If set to true, messages with invalid payload won\u2019t be acknowledged to allow to forward them farther to the dead-letter queue. The default value is false. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter decodeMessage bool (Optional) DecodeMessage specifies if all the messages should be base64 decoded. If set to true the decoding is done before the evaluation of JSONBody waitTimeInSeconds int32 (Optional) WaitTimeInSeconds is the duration (in seconds) for which the event source waits between empty results from the queue. The default value is 3 seconds. AzureServiceBusEventSource ( Appears on: EventSourceSpec ) AzureServiceBusEventSource describes the event source for azure service bus More info at https://docs.microsoft.com/en-us/azure/service-bus-messaging/ Field Description connectionString Kubernetes core/v1.SecretKeySelector (Optional) ConnectionString is the connection string for the Azure Service Bus. If this fields is not provided it will try to access via Azure AD with DefaultAzureCredential and FullyQualifiedNamespace. queueName string QueueName is the name of the Azure Service Bus Queue topicName string TopicName is the name of the Azure Service Bus Topic subscriptionName string SubscriptionName is the name of the Azure Service Bus Topic Subscription tls TLSConfig (Optional) TLS configuration for the service bus client jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter fullyQualifiedNamespace string (Optional) FullyQualifiedNamespace is the Service Bus namespace name (ex: myservicebus.servicebus.windows.net). This field is necessary to access via Azure AD (managed identity) and it is ignored if ConnectionString is set. AzureServiceBusTrigger ( Appears on: TriggerTemplate ) Field Description connectionString Kubernetes core/v1.SecretKeySelector ConnectionString is the connection string for the Azure Service Bus queueName string QueueName is the name of the Azure Service Bus Queue topicName string TopicName is the name of the Azure Service Bus Topic subscriptionName string SubscriptionName is the name of the Azure Service Bus Topic Subscription tls TLSConfig (Optional) TLS configuration for the service bus client payload \\[\\]TriggerParameter Payload is the list of key-value extracted from an event payload to construct the request payload. parameters \\[\\]TriggerParameter (Optional) Parameters is the list of key-value extracted from event\u2019s payload that are applied to the trigger resource. Backoff ( Appears on: AMQPEventSource , EmitterEventSource , K8SResourcePolicy , KafkaEventSource , MQTTEventSource , NATSEventsSource , NSQEventSource , PulsarEventSource , PulsarTrigger , Trigger ) Backoff for an operation Field Description duration Int64OrString (Optional) The initial duration in nanoseconds or strings like \u201c1s\u201d, \u201c3m\u201d factor Amount (Optional) Duration is multiplied by factor each iteration jitter Amount (Optional) The amount of jitter applied each iteration steps int32 (Optional) Exit with error after this many steps BasicAuth ( Appears on: AMQPEventSource , GerritEventSource , HTTPTrigger , MQTTEventSource , NATSAuth , SchemaRegistryConfig ) BasicAuth contains the reference to K8s secrets that holds the username and password Field Description username Kubernetes core/v1.SecretKeySelector Username refers to the Kubernetes secret that holds the username required for basic auth. password Kubernetes core/v1.SecretKeySelector Password refers to the Kubernetes secret that holds the password required for basic auth. BitbucketAuth ( Appears on: BitbucketEventSource ) BitbucketAuth holds the different auth strategies for connecting to Bitbucket Field Description basic BitbucketBasicAuth (Optional) Basic is BasicAuth auth strategy. oauthToken Kubernetes core/v1.SecretKeySelector (Optional) OAuthToken refers to the K8s secret that holds the OAuth Bearer token. BitbucketBasicAuth ( Appears on: BitbucketAuth ) BasicAuth holds the information required to authenticate user via basic auth mechanism Field Description username Kubernetes core/v1.SecretKeySelector Username refers to the K8s secret that holds the username. password Kubernetes core/v1.SecretKeySelector Password refers to the K8s secret that holds the password. BitbucketEventSource ( Appears on: EventSourceSpec ) BitbucketEventSource describes the event source for Bitbucket Field Description deleteHookOnFinish bool (Optional) DeleteHookOnFinish determines whether to delete the defined Bitbucket hook once the event source is stopped. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will be passed along the event payload. webhook WebhookContext Webhook refers to the configuration required to run an http server auth BitbucketAuth Auth information required to connect to Bitbucket. events \\[\\]string Events this webhook is subscribed to. owner string (Optional) DeprecatedOwner is the owner of the repository. Deprecated: use Repositories instead. Will be unsupported in v1.9 projectKey string (Optional) DeprecatedProjectKey is the key of the project to which the repository relates Deprecated: use Repositories instead. Will be unsupported in v1.9 repositorySlug string (Optional) DeprecatedRepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL Deprecated: use Repositories instead. Will be unsupported in v1.9 repositories \\[\\]BitbucketRepository (Optional) Repositories holds a list of repositories for which integration needs to set up filter EventSourceFilter (Optional) Filter BitbucketRepository ( Appears on: BitbucketEventSource ) Field Description owner string Owner is the owner of the repository repositorySlug string RepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL BitbucketServerEventSource ( Appears on: EventSourceSpec ) BitbucketServerEventSource refers to event-source related to Bitbucket Server events Field Description webhook WebhookContext Webhook holds configuration to run a http server. projectKey string (Optional) DeprecatedProjectKey is the key of project for which integration needs to set up. Deprecated: use Repositories instead. Will be unsupported in v1.8. repositorySlug string (Optional) DeprecatedRepositorySlug is the slug of the repository for which integration needs to set up. Deprecated: use Repositories instead. Will be unsupported in v1.8. projects \\[\\]string (Optional) Projects holds a list of projects for which integration needs to set up, this will add the webhook to all repositories in the project. repositories \\[\\]BitbucketServerRepository (Optional) Repositories holds a list of repositories for which integration needs to set up. events \\[\\]string (Optional) Events are bitbucket event to listen to. Refer https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html skipBranchRefsChangedOnOpenPR bool (Optional) SkipBranchRefsChangedOnOpenPR bypasses the event repo:refs_changed for branches whenever there\u2019s an associated open pull request. This helps in optimizing the event handling process by avoiding unnecessary triggers for branch reference changes that are already part of a pull request under review. accessToken Kubernetes core/v1.SecretKeySelector AccessToken is reference to K8s secret which holds the bitbucket api access information. webhookSecret Kubernetes core/v1.SecretKeySelector WebhookSecret is reference to K8s secret which holds the bitbucket webhook secret (for HMAC validation). bitbucketserverBaseURL string BitbucketServerBaseURL is the base URL for API requests to a custom endpoint. deleteHookOnFinish bool (Optional) DeleteHookOnFinish determines whether to delete the Bitbucket Server hook for the project once the event source is stopped. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter tls TLSConfig (Optional) TLS configuration for the bitbucketserver client. checkInterval string (Optional) CheckInterval is a duration in which to wait before checking that the webhooks exist, e.g. 1s, 30m, 2h\u2026 (defaults to 1m) BitbucketServerRepository ( Appears on: BitbucketServerEventSource ) Field Description projectKey string ProjectKey is the key of project for which integration needs to set up. repositorySlug string RepositorySlug is the slug of the repository for which integration needs to set up. BusConfig ( Appears on: EventBusStatus ) BusConfig has the finalized configuration for EventBus Field Description nats NATSConfig (Optional) jetstream JetStreamConfig (Optional) kafka KafkaBus (Optional) CalendarEventSource ( Appears on: EventSourceSpec ) CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed. Schedule takes precedence over interval; interval takes precedence over recurrence Field Description schedule string (Optional) Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron interval string (Optional) Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h\u2026 exclusionDates \\[\\]string ExclusionDates defines the list of DATE-TIME exceptions for recurring events. timezone string (Optional) Timezone in which to run the schedule metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. persistence EventPersistence Persistence hold the configuration for event persistence filter EventSourceFilter (Optional) Filter CatchupConfiguration ( Appears on: EventPersistence ) Field Description enabled bool Enabled enables to triggered the missed schedule when eventsource restarts maxDuration string MaxDuration holds max catchup duration Comparator ( string alias) ( Appears on: DataFilter ) Comparator refers to the comparator operator for a data filter Condition ( Appears on: Status ) Condition contains details about resource state Field Description type ConditionType Condition type. status Kubernetes core/v1.ConditionStatus Condition status, True, False or Unknown. lastTransitionTime Kubernetes meta/v1.Time (Optional) Last time the condition transitioned from one status to another. reason string (Optional) Unique, this should be a short, machine understandable string that gives the reason for condition\u2019s last transition. For example, \u201cImageNotFound\u201d message string (Optional) Human-readable message indicating details about last transition. ConditionType ( string alias) ( Appears on: Condition ) ConditionType is a valid value of Condition.Type ConditionsResetByTime ( Appears on: ConditionsResetCriteria ) Field Description cron string Cron is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron timezone string (Optional) ConditionsResetCriteria ( Appears on: TriggerTemplate ) Field Description byTime ConditionsResetByTime Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron ConfigMapPersistence ( Appears on: EventPersistence ) Field Description name string Name of the configmap createIfNotExist bool CreateIfNotExist will create configmap if it doesn\u2019t exists ContainerTemplate ( Appears on: JetStreamBus , NativeStrategy ) ContainerTemplate defines customized spec for a container Field Description resources Kubernetes core/v1.ResourceRequirements imagePullPolicy Kubernetes core/v1.PullPolicy securityContext Kubernetes core/v1.SecurityContext CustomTrigger ( Appears on: TriggerTemplate ) CustomTrigger refers to the specification of the custom trigger. Field Description serverURL string ServerURL is the url of the gRPC server that executes custom trigger secure bool Secure refers to type of the connection between sensor to custom trigger gRPC certSecret Kubernetes core/v1.SecretKeySelector CertSecret refers to the secret that contains cert for secure connection between sensor and custom trigger gRPC server. serverNameOverride string ServerNameOverride for the secure connection between sensor and custom trigger gRPC server. spec map\\[string\\]string Spec is the custom trigger resource specification that custom trigger gRPC server knows how to interpret. parameters \\[\\]TriggerParameter Parameters is the list of parameters that is applied to resolved custom trigger trigger object. payload \\[\\]TriggerParameter Payload is the list of key-value extracted from an event payload to construct the request payload. DataFilter ( Appears on: EventDependencyFilter ) DataFilter describes constraints and filters for event data Regular Expressions are purposefully not a feature as they are overkill for our uses here See Rob Pike\u2019s Post: https://commandcenter.blogspot.com/2011/08/regular-expressions-in-lexing-and.html Field Description path string Path is the JSONPath of the event\u2019s (JSON decoded) data key Path is a series of keys separated by a dot. A key may contain wildcard characters \u2018\\*\u2019 and \u2018?\u2019. To access an array value use the index as the key. The dot and wildcard characters can be escaped with \u2018\u2019. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this. type JSONType Type contains the JSON type of the data value \\[\\]string Value is the allowed string values for this key Booleans are passed using strconv.ParseBool() Numbers are parsed using as float64 using strconv.ParseFloat() Strings are taken as is Nils this value is ignored comparator Comparator Comparator compares the event data with a user given value. Can be \u201c\\>=\u201d, \u201c\\>\u201d, \u201c=\u201d, \u201c!=\u201d, \u201c\\ < \u201d, or \u201c\\ < =\u201d. Is optional, and if left blank treated as equality \u201c=\u201d. template string Template is a go-template for extracting a string from the event\u2019s data. A Template is evaluated with provided path, type and value. The templating follows the standard go-template syntax as well as sprig\u2019s extra functions. See https://pkg.go.dev/text/template and https://masterminds.github.io/sprig/ EmailTrigger ( Appears on: TriggerTemplate ) EmailTrigger refers to the specification of the email notification trigger. Field Description parameters \\[\\]TriggerParameter (Optional) Parameters is the list of key-value extracted from event\u2019s payload that are applied to the trigger resource. username string (Optional) Username refers to the username used to connect to the smtp server. smtpPassword Kubernetes core/v1.SecretKeySelector (Optional) SMTPPassword refers to the Kubernetes secret that holds the smtp password used to connect to smtp server. host string Host refers to the smtp host url to which email is send. port int32 (Optional) Port refers to the smtp server port to which email is send. Defaults to 0. to \\[\\]string (Optional) To refers to the email addresses to which the emails are send. from string (Optional) From refers to the address from which the email is send from. subject string (Optional) Subject refers to the subject line for the email send. body string (Optional) Body refers to the body/content of the email send. EmitterEventSource ( Appears on: EventSourceSpec ) EmitterEventSource describes the event source for emitter More info at https://emitter.io/develop/getting-started/ Field Description broker string Broker URI to connect to. channelKey string ChannelKey refers to the channel key channelName string ChannelName refers to the channel name username Kubernetes core/v1.SecretKeySelector (Optional) Username to use to connect to broker password Kubernetes core/v1.SecretKeySelector (Optional) Password to use to connect to broker connectionBackoff Backoff (Optional) Backoff holds parameters applied to connection. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON tls TLSConfig (Optional) TLS configuration for the emitter client. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter Event Event represents the cloudevent received from an event source. Field Description context EventContext data \\[\\]byte EventBus EventBus is the definition of a eventbus resource Field Description metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec EventBusSpec nats NATSBus (Optional) NATS eventbus jetstream JetStreamBus (Optional) kafka KafkaBus (Optional) Kafka eventbus jetstreamExotic JetStreamConfig (Optional) Exotic JetStream status EventBusStatus (Optional) EventBusSpec ( Appears on: EventBus ) EventBusSpec refers to specification of eventbus resource Field Description nats NATSBus (Optional) NATS eventbus jetstream JetStreamBus (Optional) kafka KafkaBus (Optional) Kafka eventbus jetstreamExotic JetStreamConfig (Optional) Exotic JetStream EventBusStatus ( Appears on: EventBus ) EventBusStatus holds the status of the eventbus resource Field Description Status Status (Members of Status are embedded into this type.) config BusConfig Config holds the fininalized configuration of EventBus EventBusType ( string alias) EventBusType is the type of event bus EventContext ( Appears on: Event , EventDependencyFilter ) EventContext holds the context of the cloudevent received from an event source. Field Description id string ID of the event; must be non-empty and unique within the scope of the producer. source string Source - A URI describing the event producer. specversion string SpecVersion - The version of the CloudEvents specification used by the event. type string Type - The type of the occurrence which has happened. datacontenttype string DataContentType - A MIME (RFC2046) string describing the media type of data . subject string Subject - The subject of the event in the context of the event producer time Kubernetes meta/v1.Time Time - A Timestamp when the event happened. EventDependency ( Appears on: SensorSpec ) EventDependency describes a dependency Field Description name string Name is a unique name of this dependency eventSourceName string EventSourceName is the name of EventSource that Sensor depends on eventName string EventName is the name of the event filters EventDependencyFilter Filters and rules governing toleration of success and constraints on the context and data of an event transform EventDependencyTransformer Transform transforms the event data filtersLogicalOperator LogicalOperator FiltersLogicalOperator defines how different filters are evaluated together. Available values: and (&&), or (\\|\\|) Is optional and if left blank treated as and (&&). EventDependencyFilter ( Appears on: EventDependency ) EventDependencyFilter defines filters and constraints for a event. Field Description time TimeFilter Time filter on the event with escalation context EventContext Context filter constraints data \\[\\]DataFilter Data filter constraints with escalation exprs \\[\\]ExprFilter Exprs contains the list of expressions evaluated against the event payload. dataLogicalOperator LogicalOperator DataLogicalOperator defines how multiple Data filters (if defined) are evaluated together. Available values: and (&&), or (\\|\\|) Is optional and if left blank treated as and (&&). exprLogicalOperator LogicalOperator ExprLogicalOperator defines how multiple Exprs filters (if defined) are evaluated together. Available values: and (&&), or (\\|\\|) Is optional and if left blank treated as and (&&). script string Script refers to a Lua script evaluated to determine the validity of an event. EventDependencyTransformer ( Appears on: EventDependency ) EventDependencyTransformer transforms the event Field Description jq string (Optional) JQ holds the jq command applied for transformation script string (Optional) Script refers to a Lua script used to transform the event EventPersistence ( Appears on: CalendarEventSource ) Field Description catchup CatchupConfiguration Catchup enables to triggered the missed schedule when eventsource restarts configMap ConfigMapPersistence ConfigMap holds configmap details for persistence EventSource EventSource is the definition of a eventsource resource Field Description metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec EventSourceSpec eventBusName string EventBusName references to a EventBus name. By default the value is \u201cdefault\u201d template Template (Optional) Template is the pod specification for the event source service Service (Optional) Service is the specifications of the service to expose the event source minio map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.S3Artifact Minio event sources calendar map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.CalendarEventSource Calendar event sources file map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.FileEventSource File event sources resource map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.ResourceEventSource Resource event sources webhook map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.WebhookEventSource Webhook event sources amqp map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AMQPEventSource AMQP event sources kafka map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.KafkaEventSource Kafka event sources mqtt map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.MQTTEventSource MQTT event sources nats map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.NATSEventsSource NATS event sources sns map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SNSEventSource SNS event sources sqs map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SQSEventSource SQS event sources pubSub map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.PubSubEventSource PubSub event sources github map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GithubEventSource Github event sources gitlab map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GitlabEventSource Gitlab event sources hdfs map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.HDFSEventSource HDFS event sources slack map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SlackEventSource Slack event sources storageGrid map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.StorageGridEventSource StorageGrid event sources azureEventsHub map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureEventsHubEventSource AzureEventsHub event sources stripe map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.StripeEventSource Stripe event sources emitter map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.EmitterEventSource Emitter event source redis map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.RedisEventSource Redis event source nsq map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.NSQEventSource NSQ event source pulsar map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.PulsarEventSource Pulsar event source generic map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GenericEventSource Generic event source replicas int32 Replicas is the event source deployment replicas bitbucketserver map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.BitbucketServerEventSource Bitbucket Server event sources bitbucket map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.BitbucketEventSource Bitbucket event sources redisStream map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.RedisStreamEventSource Redis stream source azureServiceBus map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureServiceBusEventSource Azure Service Bus event source azureQueueStorage map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureQueueStorageEventSource AzureQueueStorage event source sftp map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SFTPEventSource SFTP event sources gerrit map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GerritEventSource Gerrit event source status EventSourceStatus (Optional) EventSourceFilter ( Appears on: AMQPEventSource , AzureEventsHubEventSource , AzureQueueStorageEventSource , AzureServiceBusEventSource , BitbucketEventSource , BitbucketServerEventSource , CalendarEventSource , EmitterEventSource , FileEventSource , GenericEventSource , GerritEventSource , GithubEventSource , GitlabEventSource , HDFSEventSource , KafkaEventSource , MQTTEventSource , NATSEventsSource , NSQEventSource , PubSubEventSource , PulsarEventSource , RedisEventSource , RedisStreamEventSource , SFTPEventSource , SNSEventSource , SQSEventSource , SlackEventSource , WebhookEventSource ) Field Description expression string EventSourceSpec ( Appears on: EventSource ) EventSourceSpec refers to specification of event-source resource Field Description eventBusName string EventBusName references to a EventBus name. By default the value is \u201cdefault\u201d template Template (Optional) Template is the pod specification for the event source service Service (Optional) Service is the specifications of the service to expose the event source minio map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.S3Artifact Minio event sources calendar map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.CalendarEventSource Calendar event sources file map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.FileEventSource File event sources resource map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.ResourceEventSource Resource event sources webhook map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.WebhookEventSource Webhook event sources amqp map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AMQPEventSource AMQP event sources kafka map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.KafkaEventSource Kafka event sources mqtt map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.MQTTEventSource MQTT event sources nats map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.NATSEventsSource NATS event sources sns map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SNSEventSource SNS event sources sqs map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SQSEventSource SQS event sources pubSub map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.PubSubEventSource PubSub event sources github map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GithubEventSource Github event sources gitlab map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GitlabEventSource Gitlab event sources hdfs map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.HDFSEventSource HDFS event sources slack map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SlackEventSource Slack event sources storageGrid map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.StorageGridEventSource StorageGrid event sources azureEventsHub map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureEventsHubEventSource AzureEventsHub event sources stripe map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.StripeEventSource Stripe event sources emitter map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.EmitterEventSource Emitter event source redis map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.RedisEventSource Redis event source nsq map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.NSQEventSource NSQ event source pulsar map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.PulsarEventSource Pulsar event source generic map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GenericEventSource Generic event source replicas int32 Replicas is the event source deployment replicas bitbucketserver map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.BitbucketServerEventSource Bitbucket Server event sources bitbucket map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.BitbucketEventSource Bitbucket event sources redisStream map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.RedisStreamEventSource Redis stream source azureServiceBus map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureServiceBusEventSource Azure Service Bus event source azureQueueStorage map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.AzureQueueStorageEventSource AzureQueueStorage event source sftp map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SFTPEventSource SFTP event sources gerrit map\\[string\\]github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.GerritEventSource Gerrit event source EventSourceStatus ( Appears on: EventSource ) EventSourceStatus holds the status of the event-source resource Field Description Status Status (Members of Status are embedded into this type.) EventSourceType ( string alias) EventSourceType is the type of event source ExprFilter ( Appears on: EventDependencyFilter ) Field Description expr string Expr refers to the expression that determines the outcome of the filter. fields \\[\\]PayloadField Fields refers to set of keys that refer to the paths within event payload. FileArtifact ( Appears on: ArtifactLocation ) FileArtifact contains information about an artifact in a filesystem Field Description path string FileEventSource ( Appears on: EventSourceSpec ) FileEventSource describes an event-source for file related events. Field Description eventType string Type of file operations to watch Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information watchPathConfig WatchPathConfig WatchPathConfig contains configuration about the file path to watch polling bool Use polling instead of inotify metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter GenericEventSource ( Appears on: EventSourceSpec ) GenericEventSource refers to a generic event source. It can be used to implement a custom event source. Field Description url string URL of the gRPC server that implements the event source. config string Config is the event source configuration insecure bool Insecure determines the type of connection. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. authSecret Kubernetes core/v1.SecretKeySelector (Optional) AuthSecret holds a secret selector that contains a bearer token for authentication filter EventSourceFilter (Optional) Filter GerritEventSource ( Appears on: EventSourceSpec ) GerritEventSource refers to event-source related to gerrit events Field Description webhook WebhookContext Webhook holds configuration to run a http server hookName string HookName is the name of the webhook events \\[\\]string Events are gerrit event to listen to. Refer https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events auth BasicAuth (Optional) Auth hosts secret selectors for username and password gerritBaseURL string GerritBaseURL is the base URL for API requests to a custom endpoint deleteHookOnFinish bool (Optional) DeleteHookOnFinish determines whether to delete the Gerrit hook for the project once the event source is stopped. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. projects \\[\\]string List of project namespace paths like \u201cwhynowy/test\u201d. sslVerify bool (Optional) SslVerify to enable ssl verification filter EventSourceFilter (Optional) Filter GitArtifact ( Appears on: ArtifactLocation ) GitArtifact contains information about an artifact stored in git Field Description url string Git URL cloneDirectory string Directory to clone the repository. We clone complete directory because GitArtifact is not limited to any specific Git service providers. Hence we don\u2019t use any specific git provider client. creds GitCreds (Optional) Creds contain reference to git username and password sshKeySecret Kubernetes core/v1.SecretKeySelector SSHKeySecret refers to the secret that contains SSH key filePath string Path to file that contains trigger resource definition branch string (Optional) Branch to use to pull trigger resource tag string (Optional) Tag to use to pull trigger resource ref string (Optional) Ref to use to pull trigger resource. Will result in a shallow clone and fetch. remote GitRemoteConfig (Optional) Remote to manage set of tracked repositories. Defaults to \u201corigin\u201d. Refer https://git-scm.com/docs/git-remote insecureIgnoreHostKey bool (Optional) Whether to ignore host key GitCreds ( Appears on: GitArtifact ) GitCreds contain reference to git username and password Field Description username Kubernetes core/v1.SecretKeySelector password Kubernetes core/v1.SecretKeySelector GitRemoteConfig ( Appears on: GitArtifact ) GitRemoteConfig contains the configuration of a Git remote Field Description name string Name of the remote to fetch from. urls \\[\\]string URLs the URLs of a remote repository. It must be non-empty. Fetch will always use the first URL, while push will use all of them. GithubAppCreds ( Appears on: GithubEventSource ) Field Description privateKey Kubernetes core/v1.SecretKeySelector PrivateKey refers to a K8s secret containing the GitHub app private key appID int64 AppID refers to the GitHub App ID for the application you created installationID int64 InstallationID refers to the Installation ID of the GitHub app you created and installed GithubEventSource ( Appears on: EventSourceSpec ) GithubEventSource refers to event-source for github related events Field Description id int64 (Optional) Id is the webhook\u2019s id Deprecated: This is not used at all, will be removed in v1.6 webhook WebhookContext Webhook refers to the configuration required to run a http server owner string (Optional) DeprecatedOwner refers to GitHub owner name i.e. argoproj Deprecated: use Repositories instead. Will be unsupported in v 1.6 repository string (Optional) DeprecatedRepository refers to GitHub repo name i.e. argo-events Deprecated: use Repositories instead. Will be unsupported in v 1.6 events \\[\\]string Events refer to Github events to which the event source will subscribe apiToken Kubernetes core/v1.SecretKeySelector (Optional) APIToken refers to a K8s secret containing github api token webhookSecret Kubernetes core/v1.SecretKeySelector (Optional) WebhookSecret refers to K8s secret containing GitHub webhook secret https://developer.github.com/webhooks/securing/ insecure bool Insecure tls verification active bool (Optional) Active refers to status of the webhook for event deliveries. https://developer.github.com/webhooks/creating/#active contentType string ContentType of the event delivery githubBaseURL string (Optional) GitHub base URL (for GitHub Enterprise) githubUploadURL string (Optional) GitHub upload URL (for GitHub Enterprise) deleteHookOnFinish bool (Optional) DeleteHookOnFinish determines whether to delete the GitHub hook for the repository once the event source is stopped. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. repositories \\[\\]OwnedRepositories Repositories holds the information of repositories, which uses repo owner as the key, and list of repo names as the value. Not required if Organizations is set. organizations \\[\\]string Organizations holds the names of organizations (used for organization level webhooks). Not required if Repositories is set. githubApp GithubAppCreds (Optional) GitHubApp holds the GitHub app credentials filter EventSourceFilter (Optional) Filter GitlabEventSource ( Appears on: EventSourceSpec ) GitlabEventSource refers to event-source related to Gitlab events Field Description webhook WebhookContext Webhook holds configuration to run a http server projectID string (Optional) DeprecatedProjectID is the id of project for which integration needs to setup Deprecated: use Projects instead. Will be unsupported in v 1.7 events \\[\\]string Events are gitlab event to listen to. Refer https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794 . accessToken Kubernetes core/v1.SecretKeySelector AccessToken references to k8 secret which holds the gitlab api access information enableSSLVerification bool (Optional) EnableSSLVerification to enable ssl verification gitlabBaseURL string GitlabBaseURL is the base URL for API requests to a custom endpoint deleteHookOnFinish bool (Optional) DeleteHookOnFinish determines whether to delete the GitLab hook for the project once the event source is stopped. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. projects \\[\\]string (Optional) List of project IDs or project namespace paths like \u201cwhynowy/test\u201d. Projects and groups cannot be empty at the same time. secretToken Kubernetes core/v1.SecretKeySelector SecretToken references to k8 secret which holds the Secret Token used by webhook config filter EventSourceFilter (Optional) Filter groups \\[\\]string (Optional) List of group IDs or group name like \u201ctest\u201d. Group level hook available in Premium and Ultimate Gitlab. HDFSEventSource ( Appears on: EventSourceSpec ) HDFSEventSource refers to event-source for HDFS related events Field Description WatchPathConfig WatchPathConfig (Members of WatchPathConfig are embedded into this type.) type string Type of file operations to watch checkInterval string CheckInterval is a string that describes an interval duration to check the directory state, e.g. 1s, 30m, 2h\u2026 (defaults to 1m) addresses \\[\\]string hdfsUser string HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used. krbCCacheSecret Kubernetes core/v1.SecretKeySelector KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos. krbKeytabSecret Kubernetes core/v1.SecretKeySelector KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos. krbUsername string KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used. krbRealm string KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used. krbConfigConfigMap Kubernetes core/v1.ConfigMapKeySelector KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used. krbServicePrincipalName string KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter HTTPTrigger ( Appears on: TriggerTemplate ) HTTPTrigger is the trigger for the HTTP request Field Description url string URL refers to the URL to send HTTP request to. payload \\[\\]TriggerParameter tls TLSConfig (Optional) TLS configuration for the HTTP client. method string (Optional) Method refers to the type of the HTTP request. Refer https://golang.org/src/net/http/method.go for more info. Default value is POST. parameters \\[\\]TriggerParameter Parameters is the list of key-value extracted from event\u2019s payload that are applied to the HTTP trigger resource. timeout int64 (Optional) Timeout refers to the HTTP request timeout in seconds. Default value is 60 seconds. basicAuth BasicAuth (Optional) BasicAuth configuration for the http request. headers map\\[string\\]string (Optional) Headers for the HTTP request. secureHeaders \\[\\]\\*github.com/argoproj/argo-events/pkg/apis/events/v1alpha1.SecureHeader (Optional) Secure Headers stored in Kubernetes Secrets for the HTTP requests. Int64OrString ( Appears on: Backoff ) Field Description type Type int64Val int64 strVal string JSONType ( string alias) ( Appears on: DataFilter ) JSONType contains the supported JSON types for data filtering JetStreamBus ( Appears on: EventBusSpec ) JetStreamBus holds the JetStream EventBus information Field Description version string JetStream version, such as \u201c2.7.3\u201d replicas int32 JetStream StatefulSet size containerTemplate ContainerTemplate (Optional) ContainerTemplate contains customized spec for Nats JetStream container reloaderContainerTemplate ContainerTemplate (Optional) ReloaderContainerTemplate contains customized spec for config reloader container metricsContainerTemplate ContainerTemplate (Optional) MetricsContainerTemplate contains customized spec for metrics container persistence PersistenceStrategy (Optional) metadata Metadata Metadata sets the pods\u2019s metadata, i.e. annotations and labels nodeSelector map\\[string\\]string (Optional) NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node\u2019s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ tolerations \\[\\]Kubernetes core/v1.Toleration (Optional) If specified, the pod\u2019s tolerations. securityContext Kubernetes core/v1.PodSecurityContext (Optional) SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. imagePullSecrets \\[\\]Kubernetes core/v1.LocalObjectReference (Optional) ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod priorityClassName string (Optional) If specified, indicates the Redis pod\u2019s priority. \u201csystem-node-critical\u201d and \u201csystem-cluster-critical\u201d are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ priority int32 (Optional) The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ affinity Kubernetes core/v1.Affinity (Optional) The pod\u2019s scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ serviceAccountName string (Optional) ServiceAccountName to apply to the StatefulSet settings string (Optional) JetStream configuration, if not specified, global settings in controller-config will be used. See https://docs.nats.io/running-a-nats-service/configuration#jetstream . Only configure \u201cmax_memory_store\u201d or \u201cmax_file_store\u201d, do not set \u201cstore_dir\u201d as it has been hardcoded. startArgs \\[\\]string (Optional) Optional arguments to start nats-server. For example, \u201c-D\u201d to enable debugging output, \u201c-DV\u201d to enable debugging and tracing. Check https://docs.nats.io/ for all the available arguments. streamConfig string (Optional) Optional configuration for the streams to be created in this JetStream service, if specified, it will be merged with the default configuration in controller-config. It accepts a YAML format configuration, available fields include, \u201cmaxBytes\u201d, \u201cmaxMsgs\u201d, \u201cmaxAge\u201d (e.g. 72h), \u201creplicas\u201d (1, 3, 5), \u201cduplicates\u201d (e.g. 5m), \u201cretention\u201d (e.g. 0: Limits (default), 1: Interest, 2: WorkQueue), \u201cDiscard\u201d (e.g. 0: DiscardOld (default), 1: DiscardNew). maxPayload string (Optional) Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB JetStreamConfig ( Appears on: BusConfig , EventBusSpec ) Field Description url string JetStream (Nats) URL accessSecret Kubernetes core/v1.SecretKeySelector (Optional) Secret for auth streamConfig string (Optional) K8SResource ( Appears on: ArtifactLocation ) K8SResource represent arbitrary structured data. Field Description value \\[\\]byte K8SResourcePolicy ( Appears on: TriggerPolicy ) K8SResourcePolicy refers to the policy used to check the state of K8s based triggers using labels Field Description labels map\\[string\\]string Labels required to identify whether a resource is in success state backoff Backoff Backoff before checking resource state errorOnBackoffTimeout bool ErrorOnBackoffTimeout determines whether sensor should transition to error state if the trigger policy is unable to determine the state of the resource KafkaBus ( Appears on: BusConfig , EventBusSpec ) KafkaBus holds the KafkaBus EventBus information Field Description url string URL to kafka cluster, multiple URLs separated by comma topic string (Optional) Topic name, defaults to {namespace_name}-{eventbus_name} version string (Optional) Kafka version, sarama defaults to the oldest supported stable version tls TLSConfig (Optional) TLS configuration for the kafka client. sasl SASLConfig (Optional) SASL configuration for the kafka client consumerGroup KafkaConsumerGroup (Optional) Consumer group for kafka client KafkaConsumerGroup ( Appears on: KafkaBus , KafkaEventSource ) Field Description groupName string The name for the consumer group to use oldest bool (Optional) When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false rebalanceStrategy string (Optional) Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default. KafkaEventSource ( Appears on: EventSourceSpec ) KafkaEventSource refers to event-source for Kafka related events Field Description url string URL to kafka cluster, multiple URLs separated by comma partition string (Optional) Partition name topic string Topic name connectionBackoff Backoff Backoff holds parameters applied to connection. tls TLSConfig (Optional) TLS configuration for the kafka client. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. consumerGroup KafkaConsumerGroup (Optional) Consumer group for kafka client limitEventsPerSecond int64 (Optional) Sets a limit on how many events get read from kafka per second. version string (Optional) Specify what kafka version is being connected to enables certain features in sarama, defaults to 1.0.0 sasl SASLConfig (Optional) SASL configuration for the kafka client filter EventSourceFilter (Optional) Filter config string (Optional) Yaml format Sarama config for Kafka connection. It follows the struct of sarama.Config. See https://github.com/IBM/sarama/blob/main/config.go e.g. consumer: fetch: min: 1 net: MaxOpenRequests: 5 KafkaTrigger ( Appears on: TriggerTemplate ) KafkaTrigger refers to the specification of the Kafka trigger. Field Description url string URL of the Kafka broker, multiple URLs separated by comma. topic string Name of the topic. More info at https://kafka.apache.org/documentation/#intro_topics partition int32 (Optional) DEPRECATED parameters \\[\\]TriggerParameter Parameters is the list of parameters that is applied to resolved Kafka trigger object. requiredAcks int32 RequiredAcks used in producer to tell the broker how many replica acknowledgements Defaults to 1 (Only wait for the leader to ack). compress bool (Optional) Compress determines whether to compress message or not. Defaults to false. If set to true, compresses message using snappy compression. flushFrequency int32 (Optional) FlushFrequency refers to the frequency in milliseconds to flush batches. Defaults to 500 milliseconds. tls TLSConfig (Optional) TLS configuration for the Kafka producer. payload \\[\\]TriggerParameter Payload is the list of key-value extracted from an event payload to construct the request payload. partitioningKey string The partitioning key for the messages put on the Kafka topic. version string (Optional) Specify what kafka version is being connected to enables certain features in sarama, defaults to 1.0.0 sasl SASLConfig (Optional) SASL configuration for the kafka client schemaRegistry SchemaRegistryConfig (Optional) Schema Registry configuration to producer message with avro format KubernetesResourceOperation ( string alias) ( Appears on: StandardK8STrigger ) KubernetesResourceOperation refers to the type of operation performed on the K8s resource LogTrigger ( Appears on: TriggerTemplate ) Field Description intervalSeconds uint64 (Optional) Only print messages every interval. Useful to prevent logging too much data for busy events. LogicalOperator ( string alias) ( Appears on: EventDependency , EventDependencyFilter ) MQTTEventSource ( Appears on: EventSourceSpec ) MQTTEventSource refers to event-source for MQTT related events Field Description url string URL to connect to broker topic string Topic name clientId string ClientID is the id of the client connectionBackoff Backoff ConnectionBackoff holds backoff applied to connection. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON tls TLSConfig (Optional) TLS configuration for the mqtt client. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter auth BasicAuth (Optional) Auth hosts secret selectors for username and password Metadata ( Appears on: JetStreamBus , NativeStrategy , Template ) Metadata holds the annotations and labels of an event source pod Field Description annotations map\\[string\\]string labels map\\[string\\]string NATSAuth ( Appears on: NATSEventsSource ) NATSAuth refers to the auth info for NATS EventSource Field Description basic BasicAuth (Optional) Baisc auth with username and password token Kubernetes core/v1.SecretKeySelector (Optional) Token used to connect nkey Kubernetes core/v1.SecretKeySelector (Optional) NKey used to connect credential Kubernetes core/v1.SecretKeySelector (Optional) credential used to connect NATSBus ( Appears on: EventBusSpec ) NATSBus holds the NATS eventbus information Field Description native NativeStrategy Native means to bring up a native NATS service exotic NATSConfig Exotic holds an exotic NATS config NATSConfig ( Appears on: BusConfig , NATSBus ) NATSConfig holds the config of NATS Field Description url string NATS streaming url clusterID string Cluster ID for nats streaming auth AuthStrategy (Optional) Auth strategy, default to AuthStrategyNone accessSecret Kubernetes core/v1.SecretKeySelector (Optional) Secret for auth NATSEventsSource ( Appears on: EventSourceSpec ) NATSEventsSource refers to event-source for NATS related events Field Description url string URL to connect to NATS cluster subject string Subject holds the name of the subject onto which messages are published connectionBackoff Backoff ConnectionBackoff holds backoff applied to connection. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON tls TLSConfig (Optional) TLS configuration for the nats client. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. auth NATSAuth (Optional) Auth information filter EventSourceFilter (Optional) Filter queue string (Optional) Queue is the name of the queue group to subscribe as if specified. Uses QueueSubscribe logic to subscribe as queue group. If the queue is empty, uses default Subscribe logic. NATSTrigger ( Appears on: TriggerTemplate ) NATSTrigger refers to the specification of the NATS trigger. Field Description url string URL of the NATS cluster. subject string Name of the subject to put message on. payload \\[\\]TriggerParameter parameters \\[\\]TriggerParameter tls TLSConfig (Optional) TLS configuration for the NATS producer. NSQEventSource ( Appears on: EventSourceSpec ) NSQEventSource describes the event source for NSQ PubSub More info at https://godoc.org/github.com/nsqio/go-nsq Field Description hostAddress string HostAddress is the address of the host for NSQ lookup topic string Topic to subscribe to. channel string Channel used for subscription connectionBackoff Backoff (Optional) Backoff holds parameters applied to connection. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON tls TLSConfig (Optional) TLS configuration for the nsq client. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter NativeStrategy ( Appears on: NATSBus ) NativeStrategy indicates to install a native NATS service Field Description replicas int32 Size is the NATS StatefulSet size auth AuthStrategy persistence PersistenceStrategy (Optional) containerTemplate ContainerTemplate (Optional) ContainerTemplate contains customized spec for NATS container metricsContainerTemplate ContainerTemplate (Optional) MetricsContainerTemplate contains customized spec for metrics container nodeSelector map\\[string\\]string (Optional) NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node\u2019s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ tolerations \\[\\]Kubernetes core/v1.Toleration (Optional) If specified, the pod\u2019s tolerations. metadata Metadata Metadata sets the pods\u2019s metadata, i.e. annotations and labels securityContext Kubernetes core/v1.PodSecurityContext (Optional) SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. maxAge string (Optional) Max Age of existing messages, i.e. \u201c72h\u201d, \u201c4h35m\u201d imagePullSecrets \\[\\]Kubernetes core/v1.LocalObjectReference (Optional) ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod serviceAccountName string (Optional) ServiceAccountName to apply to NATS StatefulSet priorityClassName string (Optional) If specified, indicates the EventSource pod\u2019s priority. \u201csystem-node-critical\u201d and \u201csystem-cluster-critical\u201d are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ priority int32 (Optional) The priority value. Various system components use this field to find the priority of the EventSource pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ affinity Kubernetes core/v1.Affinity (Optional) The pod\u2019s scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ maxMsgs uint64 Maximum number of messages per channel, 0 means unlimited. Defaults to 1000000 maxBytes string Total size of messages per channel, 0 means unlimited. Defaults to 1GB maxSubs uint64 Maximum number of subscriptions per channel, 0 means unlimited. Defaults to 1000 maxPayload string Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB raftHeartbeatTimeout string Specifies the time in follower state without a leader before attempting an election, i.e. \u201c72h\u201d, \u201c4h35m\u201d. Defaults to 2s raftElectionTimeout string Specifies the time in candidate state without a leader before attempting an election, i.e. \u201c72h\u201d, \u201c4h35m\u201d. Defaults to 2s raftLeaseTimeout string Specifies how long a leader waits without being able to contact a quorum of nodes before stepping down as leader, i.e. \u201c72h\u201d, \u201c4h35m\u201d. Defaults to 1s raftCommitTimeout string Specifies the time without an Apply() operation before sending an heartbeat to ensure timely commit, i.e. \u201c72h\u201d, \u201c4h35m\u201d. Defaults to 100ms OpenWhiskTrigger ( Appears on: TriggerTemplate ) OpenWhiskTrigger refers to the specification of the OpenWhisk trigger. Field Description host string Host URL of the OpenWhisk. version string (Optional) Version for the API. Defaults to v1. namespace string Namespace for the action. Defaults to \u201c\\_\u201d. authToken Kubernetes core/v1.SecretKeySelector (Optional) AuthToken for authentication. actionName string Name of the action/function. payload \\[\\]TriggerParameter Payload is the list of key-value extracted from an event payload to construct the request payload. parameters \\[\\]TriggerParameter (Optional) Parameters is the list of key-value extracted from event\u2019s payload that are applied to the trigger resource. OwnedRepositories ( Appears on: GithubEventSource ) Field Description owner string Organization or user name names \\[\\]string Repository names PayloadField ( Appears on: ExprFilter ) PayloadField binds a value at path within the event payload against a name. Field Description path string Path is the JSONPath of the event\u2019s (JSON decoded) data key Path is a series of keys separated by a dot. A key may contain wildcard characters \u2018\\*\u2019 and \u2018?\u2019. To access an array value use the index as the key. The dot and wildcard characters can be escaped with \u2018\u2019. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this. name string Name acts as key that holds the value at the path. PersistenceStrategy ( Appears on: JetStreamBus , NativeStrategy ) PersistenceStrategy defines the strategy of persistence Field Description storageClassName string (Optional) Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 accessMode Kubernetes core/v1.PersistentVolumeAccessMode (Optional) Available access modes such as ReadWriteOnce, ReadWriteMany https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes volumeSize k8s.io/apimachinery/pkg/api/resource.Quantity Volume size, e.g. 10Gi PubSubEventSource ( Appears on: EventSourceSpec ) PubSubEventSource refers to event-source for GCP PubSub related events. Field Description projectID string (Optional) ProjectID is GCP project ID for the subscription. Required if you run Argo Events outside of GKE/GCE. (otherwise, the default value is its project) topicProjectID string (Optional) TopicProjectID is GCP project ID for the topic. By default, it is same as ProjectID. topic string (Optional) Topic to which the subscription should belongs. Required if you want the eventsource to create a new subscription. If you specify this field along with an existing subscription, it will be verified whether it actually belongs to the specified topic. subscriptionID string (Optional) SubscriptionID is ID of subscription. Required if you use existing subscription. The default value will be auto generated hash based on this eventsource setting, so the subscription might be recreated every time you update the setting, which has a possibility of event loss. credentialSecret Kubernetes core/v1.SecretKeySelector (Optional) CredentialSecret references to the secret that contains JSON credentials to access GCP. If it is missing, it implicitly uses Workload Identity to access. https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity deleteSubscriptionOnFinish bool (Optional) DeleteSubscriptionOnFinish determines whether to delete the GCP PubSub subscription once the event source is stopped. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter PulsarEventSource ( Appears on: EventSourceSpec ) PulsarEventSource describes the event source for Apache Pulsar Field Description topics \\[\\]string Name of the topics to subscribe to. type string (Optional) Type of the subscription. Only \u201cexclusive\u201d and \u201cshared\u201d is supported. Defaults to exclusive. url string Configure the service URL for the Pulsar service. tlsTrustCertsSecret Kubernetes core/v1.SecretKeySelector (Optional) Trusted TLS certificate secret. tlsAllowInsecureConnection bool (Optional) Whether the Pulsar client accept untrusted TLS certificate from broker. tlsValidateHostname bool (Optional) Whether the Pulsar client verify the validity of the host name from broker. tls TLSConfig (Optional) TLS configuration for the pulsar client. connectionBackoff Backoff (Optional) Backoff holds parameters applied to connection. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. authTokenSecret Kubernetes core/v1.SecretKeySelector (Optional) Authentication token for the pulsar client. Either token or athenz can be set to use auth. filter EventSourceFilter (Optional) Filter authAthenzParams map\\[string\\]string (Optional) Authentication athenz parameters for the pulsar client. Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go Either token or athenz can be set to use auth. authAthenzSecret Kubernetes core/v1.SecretKeySelector (Optional) Authentication athenz privateKey secret for the pulsar client. AuthAthenzSecret must be set if AuthAthenzParams is used. PulsarTrigger ( Appears on: TriggerTemplate ) PulsarTrigger refers to the specification of the Pulsar trigger. Field Description url string Configure the service URL for the Pulsar service. topic string Name of the topic. See https://pulsar.apache.org/docs/en/concepts-messaging/ parameters \\[\\]TriggerParameter Parameters is the list of parameters that is applied to resolved Kafka trigger object. payload \\[\\]TriggerParameter Payload is the list of key-value extracted from an event payload to construct the request payload. tlsTrustCertsSecret Kubernetes core/v1.SecretKeySelector (Optional) Trusted TLS certificate secret. tlsAllowInsecureConnection bool (Optional) Whether the Pulsar client accept untrusted TLS certificate from broker. tlsValidateHostname bool (Optional) Whether the Pulsar client verify the validity of the host name from broker. tls TLSConfig (Optional) TLS configuration for the pulsar client. authTokenSecret Kubernetes core/v1.SecretKeySelector (Optional) Authentication token for the pulsar client. Either token or athenz can be set to use auth. connectionBackoff Backoff (Optional) Backoff holds parameters applied to connection. authAthenzParams map\\[string\\]string (Optional) Authentication athenz parameters for the pulsar client. Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go Either token or athenz can be set to use auth. authAthenzSecret Kubernetes core/v1.SecretKeySelector (Optional) Authentication athenz privateKey secret for the pulsar client. AuthAthenzSecret must be set if AuthAthenzParams is used. RateLimit ( Appears on: Trigger ) Field Description unit RateLimiteUnit Defaults to Second requestsPerUnit int32 RateLimiteUnit ( string alias) ( Appears on: RateLimit ) RedisEventSource ( Appears on: EventSourceSpec ) RedisEventSource describes an event source for the Redis PubSub. More info at https://godoc.org/github.com/go-redis/redis#example-PubSub Field Description hostAddress string HostAddress refers to the address of the Redis host/server password Kubernetes core/v1.SecretKeySelector (Optional) Password required for authentication if any. namespace string (Optional) Namespace to use to retrieve the password from. It should only be specified if password is declared db int32 (Optional) DB to use. If not specified, default DB 0 will be used. channels \\[\\]string tls TLSConfig (Optional) TLS configuration for the redis client. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON username string (Optional) Username required for ACL style authentication if any. RedisStreamEventSource ( Appears on: EventSourceSpec ) RedisStreamEventSource describes an event source for Redis streams ( https://redis.io/topics/streams-intro ) Field Description hostAddress string HostAddress refers to the address of the Redis host/server (master instance) password Kubernetes core/v1.SecretKeySelector (Optional) Password required for authentication if any. db int32 (Optional) DB to use. If not specified, default DB 0 will be used. streams \\[\\]string Streams to look for entries. XREADGROUP is used on all streams using a single consumer group. maxMsgCountPerRead int32 (Optional) MaxMsgCountPerRead holds the maximum number of messages per stream that will be read in each XREADGROUP of all streams Example: if there are 2 streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a total of 20 messages. Same as COUNT option in XREADGROUP( https://redis.io/topics/streams-intro ). Defaults to 10 consumerGroup string (Optional) ConsumerGroup refers to the Redis stream consumer group that will be created on all redis streams. Messages are read through this group. Defaults to \u2018argo-events-cg\u2019 tls TLSConfig (Optional) TLS configuration for the redis client. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter username string (Optional) Username required for ACL style authentication if any. ResourceEventSource ( Appears on: EventSourceSpec ) ResourceEventSource refers to a event-source for K8s resource related events. Field Description namespace string Namespace where resource is deployed filter ResourceFilter (Optional) Filter is applied on the metadata of the resource If you apply filter, then the internal event informer will only monitor objects that pass the filter. GroupVersionResource Kubernetes meta/v1.GroupVersionResource (Members of GroupVersionResource are embedded into this type.) Group of the resource eventTypes \\[\\]ResourceEventType EventTypes is the list of event type to watch. Possible values are - ADD, UPDATE and DELETE. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. ResourceEventType ( string alias) ( Appears on: ResourceEventSource ) ResourceEventType is the type of event for the K8s resource mutation ResourceFilter ( Appears on: ResourceEventSource ) ResourceFilter contains K8s ObjectMeta information to further filter resource event objects Field Description prefix string (Optional) Prefix filter is applied on the resource name. labels \\[\\]Selector (Optional) Labels provide listing options to K8s API to watch resource/s. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info. Unlike K8s field selector, multiple values are passed as comma separated values instead of list of values. Eg: value: value1,value2. Same as K8s label selector, operator \u201c=\u201d, \u201c==\u201d, \u201c!=\u201d, \u201cexists\u201d, \u201c!\u201d, \u201cnotin\u201d, \u201cin\u201d, \u201cgt\u201d and \u201clt\u201d are supported fields \\[\\]Selector (Optional) Fields provide field filters similar to K8s field selector (see https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ ). Unlike K8s field selector, it supports arbitrary fileds like \u201cspec.serviceAccountName\u201d, and the value could be a string or a regex. Same as K8s field selector, operator \u201c=\u201d, \u201c==\u201d and \u201c!=\u201d are supported. createdBy Kubernetes meta/v1.Time (Optional) If resource is created before the specified time then the event is treated as valid. afterStart bool (Optional) If the resource is created after the start time then the event is treated as valid. S3Artifact ( Appears on: ArtifactLocation , EventSourceSpec ) S3Artifact contains information about an S3 connection and bucket Field Description endpoint string bucket S3Bucket region string insecure bool accessKey Kubernetes core/v1.SecretKeySelector secretKey Kubernetes core/v1.SecretKeySelector events \\[\\]string filter S3Filter metadata map\\[string\\]string caCertificate Kubernetes core/v1.SecretKeySelector S3Bucket ( Appears on: S3Artifact ) S3Bucket contains information to describe an S3 Bucket Field Description key string name string S3Filter ( Appears on: S3Artifact ) S3Filter represents filters to apply to bucket notifications for specifying constraints on objects Field Description prefix string suffix string SASLConfig ( Appears on: KafkaBus , KafkaEventSource , KafkaTrigger ) SASLConfig refers to SASL configuration for a client Field Description mechanism string (Optional) SASLMechanism is the name of the enabled SASL mechanism. Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN). userSecret Kubernetes core/v1.SecretKeySelector User is the authentication identity (authcid) to present for SASL/PLAIN or SASL/SCRAM authentication passwordSecret Kubernetes core/v1.SecretKeySelector Password for SASL/PLAIN authentication SFTPEventSource ( Appears on: EventSourceSpec ) SFTPEventSource describes an event-source for sftp related events. Field Description eventType string Type of file operations to watch Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information watchPathConfig WatchPathConfig WatchPathConfig contains configuration about the file path to watch username Kubernetes core/v1.SecretKeySelector Username required for authentication if any. password Kubernetes core/v1.SecretKeySelector Password required for authentication if any. sshKeySecret Kubernetes core/v1.SecretKeySelector SSHKeySecret refers to the secret that contains SSH key. Key needs to contain private key and public key. address Kubernetes core/v1.SecretKeySelector Address sftp address. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter pollIntervalDuration string (Optional) PollIntervalDuration the interval at which to poll the SFTP server defaults to 10 seconds SNSEventSource ( Appears on: EventSourceSpec ) SNSEventSource refers to event-source for AWS SNS related events Field Description webhook WebhookContext Webhook configuration for http server topicArn string TopicArn accessKey Kubernetes core/v1.SecretKeySelector AccessKey refers K8s secret containing aws access key secretKey Kubernetes core/v1.SecretKeySelector SecretKey refers K8s secret containing aws secret key region string Region is AWS region roleARN string (Optional) RoleARN is the Amazon Resource Name (ARN) of the role to assume. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. validateSignature bool (Optional) ValidateSignature is boolean that can be set to true for SNS signature verification filter EventSourceFilter (Optional) Filter endpoint string (Optional) Endpoint configures connection to a specific SNS endpoint instead of Amazons servers SQSEventSource ( Appears on: EventSourceSpec ) SQSEventSource refers to event-source for AWS SQS related events Field Description accessKey Kubernetes core/v1.SecretKeySelector AccessKey refers K8s secret containing aws access key secretKey Kubernetes core/v1.SecretKeySelector SecretKey refers K8s secret containing aws secret key region string Region is AWS region queue string Queue is AWS SQS queue to listen to for messages waitTimeSeconds int64 WaitTimeSeconds is The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. roleARN string (Optional) RoleARN is the Amazon Resource Name (ARN) of the role to assume. jsonBody bool (Optional) JSONBody specifies that all event body payload coming from this source will be JSON queueAccountId string (Optional) QueueAccountID is the ID of the account that created the queue to monitor metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. dlq bool (Optional) DLQ specifies if a dead-letter queue is configured for messages that can\u2019t be processed successfully. If set to true, messages with invalid payload won\u2019t be acknowledged to allow to forward them farther to the dead-letter queue. The default value is false. filter EventSourceFilter (Optional) Filter endpoint string (Optional) Endpoint configures connection to a specific SQS endpoint instead of Amazons servers sessionToken Kubernetes core/v1.SecretKeySelector (Optional) SessionToken refers to K8s secret containing AWS temporary credentials(STS) session token SchemaRegistryConfig ( Appears on: KafkaTrigger ) SchemaRegistryConfig refers to configuration for a client Field Description url string Schema Registry URL. schemaId int32 Schema ID auth BasicAuth (Optional) SchemaRegistry - basic authentication SecureHeader SecureHeader refers to HTTP Headers with auth tokens as values Field Description name string valueFrom ValueFromSource Values can be read from either secrets or configmaps Selector ( Appears on: ResourceFilter ) Selector represents conditional operation to select K8s objects. Field Description key string Key name operation string (Optional) Supported operations like ==, != etc. Defaults to ==. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info. value string Value Sensor Sensor is the definition of a sensor resource Field Description metadata Kubernetes meta/v1.ObjectMeta Refer to the Kubernetes API documentation for the fields of the metadata field. spec SensorSpec dependencies \\[\\]EventDependency Dependencies is a list of the events that this sensor is dependent on. triggers \\[\\]Trigger Triggers is a list of the things that this sensor evokes. These are the outputs from this sensor. template Template (Optional) Template is the pod specification for the sensor errorOnFailedRound bool ErrorOnFailedRound if set to true, marks sensor state as error if the previous trigger round fails. Once sensor state is set to error , no further triggers will be processed. eventBusName string EventBusName references to a EventBus name. By default the value is \u201cdefault\u201d replicas int32 Replicas is the sensor deployment replicas revisionHistoryLimit int32 (Optional) RevisionHistoryLimit specifies how many old deployment revisions to retain loggingFields map\\[string\\]string (Optional) LoggingFields add additional key-value pairs when logging happens status SensorStatus (Optional) SensorSpec ( Appears on: Sensor ) SensorSpec represents desired sensor state Field Description dependencies \\[\\]EventDependency Dependencies is a list of the events that this sensor is dependent on. triggers \\[\\]Trigger Triggers is a list of the things that this sensor evokes. These are the outputs from this sensor. template Template (Optional) Template is the pod specification for the sensor errorOnFailedRound bool ErrorOnFailedRound if set to true, marks sensor state as error if the previous trigger round fails. Once sensor state is set to error , no further triggers will be processed. eventBusName string EventBusName references to a EventBus name. By default the value is \u201cdefault\u201d replicas int32 Replicas is the sensor deployment replicas revisionHistoryLimit int32 (Optional) RevisionHistoryLimit specifies how many old deployment revisions to retain loggingFields map\\[string\\]string (Optional) LoggingFields add additional key-value pairs when logging happens SensorStatus ( Appears on: Sensor ) SensorStatus contains information about the status of a sensor. Field Description Status Status (Members of Status are embedded into this type.) Service ( Appears on: EventSourceSpec ) Service holds the service information eventsource exposes Field Description ports \\[\\]Kubernetes core/v1.ServicePort The list of ports that are exposed by this ClusterIP service. clusterIP string (Optional) clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \u201cNone\u201d, empty string (\u201c\u201d), or a valid IP address. \u201cNone\u201d can be specified for headless services when proxying is not required. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies SlackEventSource ( Appears on: EventSourceSpec ) SlackEventSource refers to event-source for Slack related events Field Description signingSecret Kubernetes core/v1.SecretKeySelector Slack App signing secret token Kubernetes core/v1.SecretKeySelector Token for URL verification handshake webhook WebhookContext Webhook holds configuration for a REST endpoint metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. filter EventSourceFilter (Optional) Filter SlackSender ( Appears on: SlackTrigger ) Field Description username string (Optional) Username is the Slack application\u2019s username icon string (Optional) Icon is the Slack application\u2019s icon, e.g. :robot_face: or https://example.com/image.png SlackThread ( Appears on: SlackTrigger ) Field Description messageAggregationKey string (Optional) MessageAggregationKey allows to aggregate the messages to a thread by some key. broadcastMessageToChannel bool (Optional) BroadcastMessageToChannel allows to also broadcast the message from the thread to the channel SlackTrigger ( Appears on: TriggerTemplate ) SlackTrigger refers to the specification of the slack notification trigger. Field Description parameters \\[\\]TriggerParameter (Optional) Parameters is the list of key-value extracted from event\u2019s payload that are applied to the trigger resource. slackToken Kubernetes core/v1.SecretKeySelector SlackToken refers to the Kubernetes secret that holds the slack token required to send messages. channel string (Optional) Channel refers to which Slack channel to send Slack message. message string (Optional) Message refers to the message to send to the Slack channel. attachments string (Optional) Attachments is a JSON format string that represents an array of Slack attachments according to the attachments API: https://api.slack.com/reference/messaging/attachments . blocks string (Optional) Blocks is a JSON format string that represents an array of Slack blocks according to the blocks API: https://api.slack.com/reference/block-kit/blocks . thread SlackThread (Optional) Thread refers to additional options for sending messages to a Slack thread. sender SlackSender (Optional) Sender refers to additional configuration of the Slack application that sends the message. StandardK8STrigger ( Appears on: TriggerTemplate ) StandardK8STrigger is the standard Kubernetes resource trigger Field Description source ArtifactLocation Source of the K8s resource file(s) operation KubernetesResourceOperation (Optional) Operation refers to the type of operation performed on the k8s resource. Default value is Create. parameters \\[\\]TriggerParameter Parameters is the list of parameters that is applied to resolved K8s trigger object. patchStrategy k8s.io/apimachinery/pkg/types.PatchType (Optional) PatchStrategy controls the K8s object patching strategy when the trigger operation is specified as patch. possible values: \u201capplication/json-patch+json\u201d \u201capplication/merge-patch+json\u201d \u201capplication/strategic-merge-patch+json\u201d \u201capplication/apply-patch+yaml\u201d. Defaults to \u201capplication/merge-patch+json\u201d liveObject bool (Optional) LiveObject specifies whether the resource should be directly fetched from K8s instead of being marshaled from the resource artifact. If set to true, the resource artifact must contain the information required to uniquely identify the resource in the cluster, that is, you must specify \u201capiVersion\u201d, \u201ckind\u201d as well as \u201cname\u201d and \u201cnamespace\u201d meta data. Only valid for operation type update Status ( Appears on: EventBusStatus , EventSourceStatus , SensorStatus ) Status is a common structure which can be used for Status field. Field Description conditions \\[\\]Condition (Optional) Conditions are the latest available observations of a resource\u2019s current state. StatusPolicy ( Appears on: TriggerPolicy ) StatusPolicy refers to the policy used to check the state of the trigger using response status Field Description allow \\[\\]int32 StorageGridEventSource ( Appears on: EventSourceSpec ) StorageGridEventSource refers to event-source for StorageGrid related events Field Description webhook WebhookContext Webhook holds configuration for a REST endpoint events \\[\\]string filter StorageGridFilter Filter on object key which caused the notification. topicArn string TopicArn bucket string Name of the bucket to register notifications for. region string (Optional) S3 region. Defaults to us-east-1 authToken Kubernetes core/v1.SecretKeySelector Auth token for storagegrid api apiURL string APIURL is the url of the storagegrid api. metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. StorageGridFilter ( Appears on: StorageGridEventSource ) StorageGridFilter represents filters to apply to bucket notifications for specifying constraints on objects Field Description prefix string suffix string StripeEventSource ( Appears on: EventSourceSpec ) StripeEventSource describes the event source for stripe webhook notifications More info at https://stripe.com/docs/webhooks Field Description webhook WebhookContext Webhook holds configuration for a REST endpoint createWebhook bool (Optional) CreateWebhook if specified creates a new webhook programmatically. apiKey Kubernetes core/v1.SecretKeySelector (Optional) APIKey refers to K8s secret that holds Stripe API key. Used only if CreateWebhook is enabled. eventFilter \\[\\]string (Optional) EventFilter describes the type of events to listen to. If not specified, all types of events will be processed. More info at https://stripe.com/docs/api/events/list metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. TLSConfig ( Appears on: AMQPEventSource , AzureServiceBusEventSource , AzureServiceBusTrigger , BitbucketServerEventSource , EmitterEventSource , HTTPTrigger , KafkaBus , KafkaEventSource , KafkaTrigger , MQTTEventSource , NATSEventsSource , NATSTrigger , NSQEventSource , PulsarEventSource , PulsarTrigger , RedisEventSource , RedisStreamEventSource ) TLSConfig refers to TLS configuration for a client. Field Description caCertSecret Kubernetes core/v1.SecretKeySelector CACertSecret refers to the secret that contains the CA cert clientCertSecret Kubernetes core/v1.SecretKeySelector ClientCertSecret refers to the secret that contains the client cert clientKeySecret Kubernetes core/v1.SecretKeySelector ClientKeySecret refers to the secret that contains the client key insecureSkipVerify bool (Optional) If true, skips creation of TLSConfig with certs and creates an empty TLSConfig. (Defaults to false) Template ( Appears on: EventSourceSpec , SensorSpec ) Template holds the information of a deployment template Field Description metadata Metadata Metadata sets the pods\u2019s metadata, i.e. annotations and labels serviceAccountName string (Optional) ServiceAccountName is the name of the ServiceAccount to use to run sensor pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ container Kubernetes core/v1.Container (Optional) Container is the main container image to run in the sensor pod volumes \\[\\]Kubernetes core/v1.Volume (Optional) Volumes is a list of volumes that can be mounted by containers in a workflow. securityContext Kubernetes core/v1.PodSecurityContext (Optional) SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. nodeSelector map\\[string\\]string (Optional) NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node\u2019s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ tolerations \\[\\]Kubernetes core/v1.Toleration (Optional) If specified, the pod\u2019s tolerations. imagePullSecrets \\[\\]Kubernetes core/v1.LocalObjectReference (Optional) ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod priorityClassName string (Optional) If specified, indicates the EventSource pod\u2019s priority. \u201csystem-node-critical\u201d and \u201csystem-cluster-critical\u201d are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ priority int32 (Optional) The priority value. Various system components use this field to find the priority of the EventSource pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ affinity Kubernetes core/v1.Affinity (Optional) If specified, the pod\u2019s scheduling constraints TimeFilter ( Appears on: EventDependencyFilter ) TimeFilter describes a window in time. It filters out events that occur outside the time limits. In other words, only events that occur after Start and before Stop will pass this filter. Field Description start string Start is the beginning of a time window in UTC. Before this time, events for this dependency are ignored. Format is hh:mm:ss. stop string Stop is the end of a time window in UTC. After or equal to this time, events for this dependency are ignored and Format is hh:mm:ss. If it is smaller than Start, it is treated as next day of Start (e.g.: 22:00:00-01:00:00 means 22:00:00-25:00:00). Trigger ( Appears on: SensorSpec , Trigger ) Trigger is an action taken, output produced, an event created, a message sent Field Description template TriggerTemplate Template describes the trigger specification. parameters \\[\\]TriggerParameter Parameters is the list of parameters applied to the trigger template definition policy TriggerPolicy (Optional) Policy to configure backoff and execution criteria for the trigger retryStrategy Backoff (Optional) Retry strategy, defaults to no retry rateLimit RateLimit (Optional) Rate limit, default unit is Second atLeastOnce bool (Optional) AtLeastOnce determines the trigger execution semantics. Defaults to false. Trigger execution will use at-most-once semantics. If set to true, Trigger execution will switch to at-least-once semantics. dlqTrigger Trigger (Optional) If the trigger fails, it will retry up to the configured number of retries. If the maximum retries are reached and the trigger is set to execute atLeastOnce, the dead letter queue (DLQ) trigger will be invoked if specified. Invoking the dead letter queue trigger helps prevent data loss. TriggerParameter ( Appears on: AWSLambdaTrigger , ArgoWorkflowTrigger , AzureEventHubsTrigger , AzureServiceBusTrigger , CustomTrigger , EmailTrigger , HTTPTrigger , KafkaTrigger , NATSTrigger , OpenWhiskTrigger , PulsarTrigger , SlackTrigger , StandardK8STrigger , Trigger ) TriggerParameter indicates a passed parameter to a service template Field Description src TriggerParameterSource Src contains a source reference to the value of the parameter from a dependency dest string Dest is the JSONPath of a resource key. A path is a series of keys separated by a dot. The colon character can be escaped with \u2018.\u2019 The -1 key can be used to append a value to an existing array. See https://github.com/tidwall/sjson#path-syntax for more information about how this is used. operation TriggerParameterOperation Operation is what to do with the existing value at Dest, whether to \u2018prepend\u2019, \u2018overwrite\u2019, or \u2018append\u2019 it. TriggerParameterOperation ( string alias) ( Appears on: TriggerParameter ) TriggerParameterOperation represents how to set a trigger destination resource key TriggerParameterSource ( Appears on: TriggerParameter ) TriggerParameterSource defines the source for a parameter from a event event Field Description dependencyName string DependencyName refers to the name of the dependency. The event which is stored for this dependency is used as payload for the parameterization. Make sure to refer to one of the dependencies you have defined under Dependencies list. contextKey string ContextKey is the JSONPath of the event\u2019s (JSON decoded) context key ContextKey is a series of keys separated by a dot. A key may contain wildcard characters \u2018\\*\u2019 and \u2018?\u2019. To access an array value use the index as the key. The dot and wildcard characters can be escaped with \u2018\u2019. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this. contextTemplate string ContextTemplate is a go-template for extracting a string from the event\u2019s context. If a ContextTemplate is provided with a ContextKey, the template will be evaluated first and fallback to the ContextKey. The templating follows the standard go-template syntax as well as sprig\u2019s extra functions. See https://pkg.go.dev/text/template and https://masterminds.github.io/sprig/ dataKey string DataKey is the JSONPath of the event\u2019s (JSON decoded) data key DataKey is a series of keys separated by a dot. A key may contain wildcard characters \u2018\\*\u2019 and \u2018?\u2019. To access an array value use the index as the key. The dot and wildcard characters can be escaped with \u2018\u2019. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this. dataTemplate string DataTemplate is a go-template for extracting a string from the event\u2019s data. If a DataTemplate is provided with a DataKey, the template will be evaluated first and fallback to the DataKey. The templating follows the standard go-template syntax as well as sprig\u2019s extra functions. See https://pkg.go.dev/text/template and https://masterminds.github.io/sprig/ value string Value is the default literal value to use for this parameter source This is only used if the DataKey is invalid. If the DataKey is invalid and this is not defined, this param source will produce an error. useRawData bool (Optional) UseRawData indicates if the value in an event at data key should be used without converting to string. When true, a number, boolean, json or string parameter may be extracted. When the field is unspecified, or explicitly false, the behavior is to turn the extracted field into a string. (e.g. when set to true, the parameter 123 will resolve to the numerical type, but when false, or not provided, the string \u201c123\u201d will be resolved) TriggerPolicy ( Appears on: Trigger ) TriggerPolicy dictates the policy for the trigger retries Field Description k8s K8SResourcePolicy K8SResourcePolicy refers to the policy used to check the state of K8s based triggers using using labels status StatusPolicy Status refers to the policy used to check the state of the trigger using response status TriggerTemplate ( Appears on: Trigger ) TriggerTemplate is the template that describes trigger specification. Field Description name string Name is a unique name of the action to take. conditions string (Optional) Conditions is the conditions to execute the trigger. For example: \u201c(dep01 \\|\\| dep02) && dep04\u201d k8s StandardK8STrigger (Optional) StandardK8STrigger refers to the trigger designed to create or update a generic Kubernetes resource. argoWorkflow ArgoWorkflowTrigger (Optional) ArgoWorkflow refers to the trigger that can perform various operations on an Argo workflow. http HTTPTrigger (Optional) HTTP refers to the trigger designed to dispatch a HTTP request with on-the-fly constructable payload. awsLambda AWSLambdaTrigger (Optional) AWSLambda refers to the trigger designed to invoke AWS Lambda function with with on-the-fly constructable payload. custom CustomTrigger (Optional) CustomTrigger refers to the trigger designed to connect to a gRPC trigger server and execute a custom trigger. kafka KafkaTrigger Kafka refers to the trigger designed to place messages on Kafka topic. nats NATSTrigger NATS refers to the trigger designed to place message on NATS subject. slack SlackTrigger (Optional) Slack refers to the trigger designed to send slack notification message. openWhisk OpenWhiskTrigger (Optional) OpenWhisk refers to the trigger designed to invoke OpenWhisk action. log LogTrigger (Optional) Log refers to the trigger designed to invoke log the event. azureEventHubs AzureEventHubsTrigger (Optional) AzureEventHubs refers to the trigger send an event to an Azure Event Hub. pulsar PulsarTrigger (Optional) Pulsar refers to the trigger designed to place messages on Pulsar topic. conditionsReset \\[\\]ConditionsResetCriteria (Optional) Criteria to reset the conditons azureServiceBus AzureServiceBusTrigger (Optional) AzureServiceBus refers to the trigger designed to place messages on Azure Service Bus email EmailTrigger (Optional) Email refers to the trigger designed to send an email notification TriggerType ( string alias) TriggerType is the type of trigger Type ( int64 alias) ( Appears on: Int64OrString ) Type represents the stored type of Int64OrString. URLArtifact ( Appears on: ArtifactLocation ) URLArtifact contains information about an artifact at an http endpoint. Field Description path string Path is the complete URL verifyCert bool VerifyCert decides whether the connection is secure or not ValueFromSource ( Appears on: SecureHeader ) ValueFromSource allows you to reference keys from either a Configmap or Secret Field Description secretKeyRef Kubernetes core/v1.SecretKeySelector configMapKeyRef Kubernetes core/v1.ConfigMapKeySelector WatchPathConfig ( Appears on: FileEventSource , HDFSEventSource , SFTPEventSource ) Field Description directory string Directory to watch for events path string Path is relative path of object to watch with respect to the directory pathRegexp string PathRegexp is regexp of relative path of object to watch with respect to the directory WebhookContext ( Appears on: BitbucketEventSource , BitbucketServerEventSource , GerritEventSource , GithubEventSource , GitlabEventSource , SNSEventSource , SlackEventSource , StorageGridEventSource , StripeEventSource , WebhookEventSource ) WebhookContext holds a general purpose REST API context Field Description endpoint string REST API endpoint method string Method is HTTP request method that indicates the desired action to be performed for a given resource. See RFC7231 Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content port string Port on which HTTP server is listening for incoming events. url string URL is the url of the server. serverCertSecret Kubernetes core/v1.SecretKeySelector ServerCertPath refers the file that contains the cert. serverKeySecret Kubernetes core/v1.SecretKeySelector ServerKeyPath refers the file that contains private key metadata map\\[string\\]string (Optional) Metadata holds the user defined metadata which will passed along the event payload. authSecret Kubernetes core/v1.SecretKeySelector (Optional) AuthSecret holds a secret selector that contains a bearer token for authentication maxPayloadSize int64 (Optional) MaxPayloadSize is the maximum webhook payload size that the server will accept. Requests exceeding that limit will be rejected with \u201crequest too large\u201d response. Default value: 1048576 (1MB). WebhookEventSource ( Appears on: EventSourceSpec ) CalendarEventSource describes an HTTP based EventSource Field Description WebhookContext WebhookContext (Members of WebhookContext are embedded into this type.) filter EventSourceFilter (Optional) Filter Generated with gen-crd-api-reference-docs .","title":"APIs"},{"location":"CONTRIBUTING/","text":"Contributing \u00b6 How To Provide Feedback \u00b6 Please raise an issue in Github . Code of Conduct \u00b6 See CNCF Code of Conduct . Contributor Meetings \u00b6 A weekly opportunity for committers and maintainers of Workflows, Events, and Dataflow to discuss their current work and talk about what\u2019s next. Feel free to join us! For Contributor Meeting information, minutes and recordings please see here . How To Contribute \u00b6 We're always looking for contributors. Documentation - something missing or unclear? Please submit a pull request! Code contribution - investigate a good first issue , or anything not assigned. Join the #argo-contributors channel on our Slack . Running Locally \u00b6 To run Argo Events locally for development: developer guide . Dependencies \u00b6 Dependencies increase the risk of security issues and have on-going maintenance costs. The dependency must pass these test: A strong use case. It has an acceptable license (e.g. MIT). It is actively maintained. It has no security issues. Example, should we add fasttemplate , view the Snyk report : Test Outcome A strong use case. \u274c Fail. We can use text/template . It has an acceptable license (e.g. MIT) \u2705 Pass. MIT license. It is actively maintained. \u274c Fail. Project is inactive. It has no security issues. \u2705 Pass. No known security issues. No, we should not add that dependency. Contributor Workshop \u00b6 We have a 90m video on YouTube show you have to get hands-on contributing.","title":"Contributing"},{"location":"CONTRIBUTING/#contributing","text":"","title":"Contributing"},{"location":"CONTRIBUTING/#how-to-provide-feedback","text":"Please raise an issue in Github .","title":"How To Provide Feedback"},{"location":"CONTRIBUTING/#code-of-conduct","text":"See CNCF Code of Conduct .","title":"Code of Conduct"},{"location":"CONTRIBUTING/#contributor-meetings","text":"A weekly opportunity for committers and maintainers of Workflows, Events, and Dataflow to discuss their current work and talk about what\u2019s next. Feel free to join us! For Contributor Meeting information, minutes and recordings please see here .","title":"Contributor Meetings"},{"location":"CONTRIBUTING/#how-to-contribute","text":"We're always looking for contributors. Documentation - something missing or unclear? Please submit a pull request! Code contribution - investigate a good first issue , or anything not assigned. Join the #argo-contributors channel on our Slack .","title":"How To Contribute"},{"location":"CONTRIBUTING/#running-locally","text":"To run Argo Events locally for development: developer guide .","title":"Running Locally"},{"location":"CONTRIBUTING/#dependencies","text":"Dependencies increase the risk of security issues and have on-going maintenance costs. The dependency must pass these test: A strong use case. It has an acceptable license (e.g. MIT). It is actively maintained. It has no security issues. Example, should we add fasttemplate , view the Snyk report : Test Outcome A strong use case. \u274c Fail. We can use text/template . It has an acceptable license (e.g. MIT) \u2705 Pass. MIT license. It is actively maintained. \u274c Fail. Project is inactive. It has no security issues. \u2705 Pass. No known security issues. No, we should not add that dependency.","title":"Dependencies"},{"location":"CONTRIBUTING/#contributor-workshop","text":"We have a 90m video on YouTube show you have to get hands-on contributing.","title":"Contributor Workshop"},{"location":"FAQ/","text":"FAQs \u00b6 Q. How to get started with Argo Events? A . The recommended way to get started with Argo Events is: Read the basic concepts about EventBus , Sensor and Event Source . Install Argo Events as outlined here . Read the tutorials available here . Q. Can I deploy event-source and sensor in a namespace different than argo-events ? A . Yes. If you want to deploy the event-source in a different namespace than argo-events , please update the event-source definition with the desired namespace and service account. Make sure to grant the service account the necessary roles . Q. How to debug Argo-Events. A . Make sure you have installed everything as instructed here . Make sure you have the EventBus resource created within the namespace. The event-bus, event-source and sensor pods must be running. If you see any issue with the pods, check the logs for sensor-controller, event-source-controller and event-bus-controller. If event-source and sensor pods are running, but you are not receiving any events: Make sure you have configured the event source correctly. Check the event-source pod's containers logs. Note: You can set the environment variable LOG_LEVEL:info/debug/error in any of the containers to output debug logs. See here for a debug example. Q. The event-source pod is receiving events but nothing happens. A . Check the sensor resource is deployed and a pod is running for the resource. If the sensor pod is running, check for Started to subscribe events for triggers in the logs. If the sensor has subscribed to the event-bus but is unable to create the trigger resource, please raise an issue on GitHub. The sensor's dependencies have a specific eventSourceName and eventName that should match the values defined in the EventSource resource. See full details here . Q. Helm chart installation does not work. A. The Helm chart for argo events is maintained by the community and can be out of sync with latest release version. The official installation file is available here . If you notice the Helm chart is outdated, we encourage you to contribute to the argo-helm repository on GitHub. Q. Kustomization file doesn't have a X resource. A. The kustomization.yaml file is maintained by the community. If you notice that it is out of sync with the official installation file, please raise a PR. Q. Can I use the Minio event-source for AWS S3 notifications? A. No. The Minio event-source is exclusively for use only with Minio servers. If you want to trigger workloads on an AWS S3 bucket notification, set up the AWS SNS event-source. Q. If I have multiple event dependencies and triggers in a single sensor, can I execute a specific trigger upon a specific event? A. Yes, this functionality is offered by the sensor event resolution circuitry. Please take a look at the Circuit and Switch tutorial. Q. The latest image tag does not point to latest release tag? A. When it comes to image tags, the golden rule is do not trust the latest tag . Always use the pinned version of the images. We will try to keep the latest tag in sync with the most recently released version. Q. Where can I find the event structure for a particular event-source? A. Please refer to this file to understand the structure of different types of events dispatched by the event-source pod.","title":"FAQs"},{"location":"FAQ/#faqs","text":"Q. How to get started with Argo Events? A . The recommended way to get started with Argo Events is: Read the basic concepts about EventBus , Sensor and Event Source . Install Argo Events as outlined here . Read the tutorials available here . Q. Can I deploy event-source and sensor in a namespace different than argo-events ? A . Yes. If you want to deploy the event-source in a different namespace than argo-events , please update the event-source definition with the desired namespace and service account. Make sure to grant the service account the necessary roles . Q. How to debug Argo-Events. A . Make sure you have installed everything as instructed here . Make sure you have the EventBus resource created within the namespace. The event-bus, event-source and sensor pods must be running. If you see any issue with the pods, check the logs for sensor-controller, event-source-controller and event-bus-controller. If event-source and sensor pods are running, but you are not receiving any events: Make sure you have configured the event source correctly. Check the event-source pod's containers logs. Note: You can set the environment variable LOG_LEVEL:info/debug/error in any of the containers to output debug logs. See here for a debug example. Q. The event-source pod is receiving events but nothing happens. A . Check the sensor resource is deployed and a pod is running for the resource. If the sensor pod is running, check for Started to subscribe events for triggers in the logs. If the sensor has subscribed to the event-bus but is unable to create the trigger resource, please raise an issue on GitHub. The sensor's dependencies have a specific eventSourceName and eventName that should match the values defined in the EventSource resource. See full details here . Q. Helm chart installation does not work. A. The Helm chart for argo events is maintained by the community and can be out of sync with latest release version. The official installation file is available here . If you notice the Helm chart is outdated, we encourage you to contribute to the argo-helm repository on GitHub. Q. Kustomization file doesn't have a X resource. A. The kustomization.yaml file is maintained by the community. If you notice that it is out of sync with the official installation file, please raise a PR. Q. Can I use the Minio event-source for AWS S3 notifications? A. No. The Minio event-source is exclusively for use only with Minio servers. If you want to trigger workloads on an AWS S3 bucket notification, set up the AWS SNS event-source. Q. If I have multiple event dependencies and triggers in a single sensor, can I execute a specific trigger upon a specific event? A. Yes, this functionality is offered by the sensor event resolution circuitry. Please take a look at the Circuit and Switch tutorial. Q. The latest image tag does not point to latest release tag? A. When it comes to image tags, the golden rule is do not trust the latest tag . Always use the pinned version of the images. We will try to keep the latest tag in sync with the most recently released version. Q. Where can I find the event structure for a particular event-source? A. Please refer to this file to understand the structure of different types of events dispatched by the event-source pod.","title":"FAQs"},{"location":"developer_guide/","text":"Developer Guide \u00b6 Setup your DEV environment \u00b6 Argo Events is native to Kubernetes so you'll need a running Kubernetes cluster. This guide includes steps for Minikube for local development, but if you have another cluster you can ignore the Minikube specific step 3. Requirements \u00b6 Golang 1.20+ Docker Installation & Setup \u00b6 1. Get the project \u00b6 git clone git@github.com:argoproj/argo-events cd argo-events 2. Start Minikube and point Docker Client to Minikube's Docker Daemon \u00b6 minikube start eval $(minikube docker-env) 3. Build the project \u00b6 make build Changing Types \u00b6 If you're making a change to the pkg/apis package, please ensure you re-run following command for code regeneration. make codegen","title":"Developer Guide"},{"location":"developer_guide/#developer-guide","text":"","title":"Developer Guide"},{"location":"developer_guide/#setup-your-dev-environment","text":"Argo Events is native to Kubernetes so you'll need a running Kubernetes cluster. This guide includes steps for Minikube for local development, but if you have another cluster you can ignore the Minikube specific step 3.","title":"Setup your DEV environment"},{"location":"developer_guide/#requirements","text":"Golang 1.20+ Docker","title":"Requirements"},{"location":"developer_guide/#installation-setup","text":"","title":"Installation & Setup"},{"location":"developer_guide/#1-get-the-project","text":"git clone git@github.com:argoproj/argo-events cd argo-events","title":"1. Get the project"},{"location":"developer_guide/#2-start-minikube-and-point-docker-client-to-minikubes-docker-daemon","text":"minikube start eval $(minikube docker-env)","title":"2. Start Minikube and point Docker Client to Minikube's Docker Daemon"},{"location":"developer_guide/#3-build-the-project","text":"make build","title":"3. Build the project"},{"location":"developer_guide/#changing-types","text":"If you're making a change to the pkg/apis package, please ensure you re-run following command for code regeneration. make codegen","title":"Changing Types"},{"location":"dr_ha_recommendations/","text":"HA/DR Recommendations \u00b6 EventBus \u00b6 A simple EventBus used for non-prod deployment or testing purpose could be: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : auth : token However this is not good enough to run your production deployment, following settings are recommended to make it more reliable, and achieve high availability. Persistent Volumes \u00b6 Even though the EventBus PODs already have data sync mechanism between them, persistent volumes are still recommended to be used to avoid any events data lost when the PODs crash. An EventBus with persistent volumes looks like below: spec : nats : native : auth : token persistence : storageClassName : standard accessMode : ReadWriteOnce volumeSize : 20Gi Anti-Affinity \u00b6 You can run the EventBus PODs with anti-affinity, to avoid the situation that all PODs are gone when a disaster happens. An EventBus with best effort node anti-affinity: spec : nats : native : auth : token affinity : podAntiAffinity : preferredDuringSchedulingIgnoredDuringExecution : - podAffinityTerm : labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname weight : 100 An EventBus with hard requirement node anti-affinity: spec : nats : native : auth : token affinity : podAntiAffinity : requiredDuringSchedulingIgnoredDuringExecution : - labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname To do AZ (Availability Zone) anti-affinity, change the value of topologyKey from kubernetes.io/hostname to topology.kubernetes.io/zone . Besides affinity , nodeSelector and tolerations also could be set through spec.nats.native.nodeSelector and spec.nats.native.tolerations . POD Priority \u00b6 Setting POD Priority could reduce the chance of PODs being evicted. Priority could be set through spec.nats.native.priorityClassName or spec.nats.native.priority . PDB \u00b6 EventBus service is essential to EventSource and Sensor Pods, it would be better to have a PodDisruptionBudget to prevent it from Pod Disruptions . The following PDB object states maxUnavailable is 1, which is suitable for a 3 replica EventBus object. If your EventBus has a name other than default , change it accordingly in the yaml. apiVersion : policy/v1beta1 kind : PodDisruptionBudget metadata : name : eventbus-default-pdb spec : maxUnavailable : 1 selector : matchLabels : controller : eventbus-controller eventbus-name : default EventSources \u00b6 Replicas \u00b6 EventSources can run with HA by setting spec.replicas to a number >1 , see more detail here . EventSource POD Node Selection \u00b6 EventSource POD affinity , nodeSelector and tolerations could be set through spec.template.affinity , spec.template.nodeSelector and spec.template.tolerations . EventSource POD Priority \u00b6 Priority could be set through spec.template.priorityClassName or spec.template.priority . Sensors \u00b6 Replicas \u00b6 Sensors can run with HA by setting spec.replicas to a number >1 , see more detail here . Sensor POD Node Selection \u00b6 Sensor POD affinity , nodeSelector and tolerations could also be set through spec.template.affinity , spec.template.nodeSelector and spec.template.tolerations . Sensor POD Priority \u00b6 Priority could be set through spec.template.priorityClassName or spec.template.priority .","title":"HA/DR Recommendations"},{"location":"dr_ha_recommendations/#hadr-recommendations","text":"","title":"HA/DR Recommendations"},{"location":"dr_ha_recommendations/#eventbus","text":"A simple EventBus used for non-prod deployment or testing purpose could be: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : auth : token However this is not good enough to run your production deployment, following settings are recommended to make it more reliable, and achieve high availability.","title":"EventBus"},{"location":"dr_ha_recommendations/#persistent-volumes","text":"Even though the EventBus PODs already have data sync mechanism between them, persistent volumes are still recommended to be used to avoid any events data lost when the PODs crash. An EventBus with persistent volumes looks like below: spec : nats : native : auth : token persistence : storageClassName : standard accessMode : ReadWriteOnce volumeSize : 20Gi","title":"Persistent Volumes"},{"location":"dr_ha_recommendations/#anti-affinity","text":"You can run the EventBus PODs with anti-affinity, to avoid the situation that all PODs are gone when a disaster happens. An EventBus with best effort node anti-affinity: spec : nats : native : auth : token affinity : podAntiAffinity : preferredDuringSchedulingIgnoredDuringExecution : - podAffinityTerm : labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname weight : 100 An EventBus with hard requirement node anti-affinity: spec : nats : native : auth : token affinity : podAntiAffinity : requiredDuringSchedulingIgnoredDuringExecution : - labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname To do AZ (Availability Zone) anti-affinity, change the value of topologyKey from kubernetes.io/hostname to topology.kubernetes.io/zone . Besides affinity , nodeSelector and tolerations also could be set through spec.nats.native.nodeSelector and spec.nats.native.tolerations .","title":"Anti-Affinity"},{"location":"dr_ha_recommendations/#pod-priority","text":"Setting POD Priority could reduce the chance of PODs being evicted. Priority could be set through spec.nats.native.priorityClassName or spec.nats.native.priority .","title":"POD Priority"},{"location":"dr_ha_recommendations/#pdb","text":"EventBus service is essential to EventSource and Sensor Pods, it would be better to have a PodDisruptionBudget to prevent it from Pod Disruptions . The following PDB object states maxUnavailable is 1, which is suitable for a 3 replica EventBus object. If your EventBus has a name other than default , change it accordingly in the yaml. apiVersion : policy/v1beta1 kind : PodDisruptionBudget metadata : name : eventbus-default-pdb spec : maxUnavailable : 1 selector : matchLabels : controller : eventbus-controller eventbus-name : default","title":"PDB"},{"location":"dr_ha_recommendations/#eventsources","text":"","title":"EventSources"},{"location":"dr_ha_recommendations/#replicas","text":"EventSources can run with HA by setting spec.replicas to a number >1 , see more detail here .","title":"Replicas"},{"location":"dr_ha_recommendations/#eventsource-pod-node-selection","text":"EventSource POD affinity , nodeSelector and tolerations could be set through spec.template.affinity , spec.template.nodeSelector and spec.template.tolerations .","title":"EventSource POD Node Selection"},{"location":"dr_ha_recommendations/#eventsource-pod-priority","text":"Priority could be set through spec.template.priorityClassName or spec.template.priority .","title":"EventSource POD Priority"},{"location":"dr_ha_recommendations/#sensors","text":"","title":"Sensors"},{"location":"dr_ha_recommendations/#replicas_1","text":"Sensors can run with HA by setting spec.replicas to a number >1 , see more detail here .","title":"Replicas"},{"location":"dr_ha_recommendations/#sensor-pod-node-selection","text":"Sensor POD affinity , nodeSelector and tolerations could also be set through spec.template.affinity , spec.template.nodeSelector and spec.template.tolerations .","title":"Sensor POD Node Selection"},{"location":"dr_ha_recommendations/#sensor-pod-priority","text":"Priority could be set through spec.template.priorityClassName or spec.template.priority .","title":"Sensor POD Priority"},{"location":"installation/","text":"Installation \u00b6 Requirements \u00b6 Kubernetes cluster >=v1.11 Installed the kubectl command-line tool >v1.11.0 Using kubectl \u00b6 Cluster-wide Installation \u00b6 Create the namespace. kubectl create namespace argo-events Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml # Install with a validating admission controller kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml NOTE: * On GKE , you may need to grant your account the ability to create new custom resource definitions and clusterroles kubectl create clusterrolebinding YOURNAME - cluster - admin - binding -- clusterrole = cluster - admin -- user = YOUREMAIL @gmail.com * On OpenShift : - Make sure to grant `anyuid` scc to the service accounts . oc adm policy add - scc - to - user anyuid system : serviceaccount : argo - events : argo - events - sa system : serviceaccount : argo - events : argo - events - webhook - sa - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo - events - webhook ClusterRole ( this is necessary for the validating admission controller ) - apiGroups : - rbac . authorization . k8s . io resources : - clusterroles / finalizers verbs : - update - apiGroups : - apps resources : - deployments / finalizers verbs : - update Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Namespace Installation \u00b6 Create the namespace. kubectl create namespace argo-events Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/namespace-install.yaml NOTE: * On GKE , you may need to grant your account the ability to create new custom resource definitions kubectl create clusterrolebinding YOURNAME - cluster - admin - binding -- clusterrole = cluster - admin -- user = YOUREMAIL @gmail.com * On OpenShift : - Make sure to grant `anyuid` scc to the service account . oc adm policy add - scc - to - user anyuid system : serviceaccount : argo - events : default - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo - events - webhook ClusterRole ( this is necessary for the validating admission controller ) - apiGroups : - rbac . authorization . k8s . io resources : - clusterroles / finalizers verbs : - update - apiGroups : - apps resources : - deployments / finalizers verbs : - update Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Using Kustomize \u00b6 Use either cluster-install , or cluster-install-with-extension , or namespace-install folder as your base for Kustomize. kustomization.yaml : bases: - github.com/argoproj/argo-events/manifests/cluster-install # OR - github.com/argoproj/argo-events/manifests/namespace-install Using Helm Chart \u00b6 Make sure you have helm client installed. To install helm, follow the link. Add argoproj repository. helm repo add argo https://argoproj.github.io/argo-helm The helm chart for argo-events is maintained solely by the community and hence the image version for controllers can go out of sync. Update the image version in values.yaml to v1.0.0. Install argo-events chart. helm install argo-events argo/argo-events -n argo-events --create-namespace Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Migrate to v1.0.0 \u00b6 If you are looking to migrate Argo Events <0.16.0 to v1.0.0, please read the migration docs .","title":"Installation"},{"location":"installation/#installation","text":"","title":"Installation"},{"location":"installation/#requirements","text":"Kubernetes cluster >=v1.11 Installed the kubectl command-line tool >v1.11.0","title":"Requirements"},{"location":"installation/#using-kubectl","text":"","title":"Using kubectl"},{"location":"installation/#cluster-wide-installation","text":"Create the namespace. kubectl create namespace argo-events Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml # Install with a validating admission controller kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml NOTE: * On GKE , you may need to grant your account the ability to create new custom resource definitions and clusterroles kubectl create clusterrolebinding YOURNAME - cluster - admin - binding -- clusterrole = cluster - admin -- user = YOUREMAIL @gmail.com * On OpenShift : - Make sure to grant `anyuid` scc to the service accounts . oc adm policy add - scc - to - user anyuid system : serviceaccount : argo - events : argo - events - sa system : serviceaccount : argo - events : argo - events - webhook - sa - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo - events - webhook ClusterRole ( this is necessary for the validating admission controller ) - apiGroups : - rbac . authorization . k8s . io resources : - clusterroles / finalizers verbs : - update - apiGroups : - apps resources : - deployments / finalizers verbs : - update Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml","title":"Cluster-wide Installation"},{"location":"installation/#namespace-installation","text":"Create the namespace. kubectl create namespace argo-events Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/namespace-install.yaml NOTE: * On GKE , you may need to grant your account the ability to create new custom resource definitions kubectl create clusterrolebinding YOURNAME - cluster - admin - binding -- clusterrole = cluster - admin -- user = YOUREMAIL @gmail.com * On OpenShift : - Make sure to grant `anyuid` scc to the service account . oc adm policy add - scc - to - user anyuid system : serviceaccount : argo - events : default - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo - events - webhook ClusterRole ( this is necessary for the validating admission controller ) - apiGroups : - rbac . authorization . k8s . io resources : - clusterroles / finalizers verbs : - update - apiGroups : - apps resources : - deployments / finalizers verbs : - update Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml","title":"Namespace Installation"},{"location":"installation/#using-kustomize","text":"Use either cluster-install , or cluster-install-with-extension , or namespace-install folder as your base for Kustomize. kustomization.yaml : bases: - github.com/argoproj/argo-events/manifests/cluster-install # OR - github.com/argoproj/argo-events/manifests/namespace-install","title":"Using Kustomize"},{"location":"installation/#using-helm-chart","text":"Make sure you have helm client installed. To install helm, follow the link. Add argoproj repository. helm repo add argo https://argoproj.github.io/argo-helm The helm chart for argo-events is maintained solely by the community and hence the image version for controllers can go out of sync. Update the image version in values.yaml to v1.0.0. Install argo-events chart. helm install argo-events argo/argo-events -n argo-events --create-namespace Deploy the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml","title":"Using Helm Chart"},{"location":"installation/#migrate-to-v100","text":"If you are looking to migrate Argo Events <0.16.0 to v1.0.0, please read the migration docs .","title":"Migrate to v1.0.0"},{"location":"managed-namespace/","text":"Managed Namespace \u00b6 You can install argo-events in either cluster scoped or namespace scoped configuration, accordingly you need to set up ClusterRole or normal Role for service account argo-events-sa . v1.7+ \u00b6 In namespace scope installation, you must run controller-manager deployment with --namespaced . If you would like to have the controller watching a separate namespace, add --managed-namespace as well. For example: - args: - --namespaced - --managed-namespace - default Prior to v1.7 \u00b6 There were 3 controller deployments ( eventbus-controller , eventsource-controller and sensor-controller ) in the versions prior to v1.7, to run namespaced installation, add --namespaced argument to each of them. Argument --managed-namespace is also supported to watch a different namespace.","title":"Managed Namespace"},{"location":"managed-namespace/#managed-namespace","text":"You can install argo-events in either cluster scoped or namespace scoped configuration, accordingly you need to set up ClusterRole or normal Role for service account argo-events-sa .","title":"Managed Namespace"},{"location":"managed-namespace/#v17","text":"In namespace scope installation, you must run controller-manager deployment with --namespaced . If you would like to have the controller watching a separate namespace, add --managed-namespace as well. For example: - args: - --namespaced - --managed-namespace - default","title":"v1.7+"},{"location":"managed-namespace/#prior-to-v17","text":"There were 3 controller deployments ( eventbus-controller , eventsource-controller and sensor-controller ) in the versions prior to v1.7, to run namespaced installation, add --namespaced argument to each of them. Argument --managed-namespace is also supported to watch a different namespace.","title":"Prior to v1.7"},{"location":"metrics/","text":"Prometheus Metrics \u00b6 v1.3 and after User Metrics \u00b6 Each of generated EventSource, Sensor and EventBus PODs exposes an HTTP endpoint for its metrics, which include things like how many events were generated, how many actions were triggered, and so on. To let your Prometheus server discover those user metrics, add following to your configuration. - job_name: 'argo-events' kubernetes_sd_configs: - role: pod selectors: - role: pod label: 'controller in (eventsource-controller,sensor-controller,eventbus-controller)' relabel_configs: - source_labels: [__meta_kubernetes_pod_label_eventbus_name, __meta_kubernetes_pod_label_controller] action: replace regex: (.+);eventbus-controller replacement: $1 target_label: 'eventbus_name' - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_controller] action: replace regex: (.+);eventbus-controller replacement: $1 target_label: 'namespace' - source_labels: [__address__, __meta_kubernetes_pod_label_controller] action: drop regex: (.+):(\\d222);eventbus-controller Also please make sure your Prometheus Service Account has the permission to do POD discovery. A sample ClusterRole like below needs to be added or merged, and grant it to your Service Account. apiVersion : rbac.authorization.k8s.io/v1 kind : ClusterRole metadata : name : pod-discovery rules : - apiGroups : [ \"\" ] resources : - pods verbs : [ \"get\" , \"list\" , \"watch\" ] EventSource \u00b6 argo_events_event_service_running_total \u00b6 How many configured events in the EventSource object are actively running. argo_events_events_sent_total \u00b6 How many events have been sent successfully. argo_events_events_sent_failed_total \u00b6 How many events failed to send to EventBus. argo_events_events_processing_failed_total \u00b6 How many events failed to process due to all the reasons, it includes argo_events_events_sent_failed_total . argo_events_event_processing_duration_milliseconds \u00b6 Event processing duration (from getting the event to send it to EventBus) in milliseconds. Sensor \u00b6 argo_events_action_triggered_total \u00b6 How many actions have been triggered successfully. argo_events_action_failed_total \u00b6 How many actions failed. argo_events_action_retries_failed_total \u00b6 How many actions failed after the retries have been exhausted. This is also incremented if there is no retryStrategy specified. argo_events_action_duration_milliseconds \u00b6 Action triggering duration. EventBus \u00b6 For native NATS EventBus, check this link for the metrics explanation. Controller Metrics \u00b6 If you are interested in Argo Events controller metrics, add following to your Prometheus configuration. - job_name: 'argo-events-controllers' kubernetes_sd_configs: - role: pod selectors: - role: pod label: 'app in (eventsource-controller,sensor-controller,eventbus-controller)' relabel_configs: - source_labels: [__address__, __meta_kubernetes_pod_label_app] action: replace regex: (.+);(eventsource-controller|sensor-controller|eventbus-controller) replacement: $1:7777 target_label: '__address__' - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_app] action: replace regex: (.+);(eventsource-controller|sensor-controller|eventbus-controller) replacement: $1 target_label: 'namespace' Golden Signals \u00b6 Following metrics are considered as Golden Signals of monitoring your applications running with Argo Events. Latency argo_events_event_processing_duration_milliseconds argo_events_action_duration_milliseconds Traffic argo_events_events_sent_total argo_events_action_triggered_total Errors argo_events_events_processing_failed_total argo_events_events_sent_failed_total argo_events_action_failed_total argo_events_action_retries_failed_total Saturation argo_events_event_service_running_total . Other Kubernetes metrics such as CPU or memory.","title":"Prometheus Metrics"},{"location":"metrics/#prometheus-metrics","text":"v1.3 and after","title":"Prometheus Metrics"},{"location":"metrics/#user-metrics","text":"Each of generated EventSource, Sensor and EventBus PODs exposes an HTTP endpoint for its metrics, which include things like how many events were generated, how many actions were triggered, and so on. To let your Prometheus server discover those user metrics, add following to your configuration. - job_name: 'argo-events' kubernetes_sd_configs: - role: pod selectors: - role: pod label: 'controller in (eventsource-controller,sensor-controller,eventbus-controller)' relabel_configs: - source_labels: [__meta_kubernetes_pod_label_eventbus_name, __meta_kubernetes_pod_label_controller] action: replace regex: (.+);eventbus-controller replacement: $1 target_label: 'eventbus_name' - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_controller] action: replace regex: (.+);eventbus-controller replacement: $1 target_label: 'namespace' - source_labels: [__address__, __meta_kubernetes_pod_label_controller] action: drop regex: (.+):(\\d222);eventbus-controller Also please make sure your Prometheus Service Account has the permission to do POD discovery. A sample ClusterRole like below needs to be added or merged, and grant it to your Service Account. apiVersion : rbac.authorization.k8s.io/v1 kind : ClusterRole metadata : name : pod-discovery rules : - apiGroups : [ \"\" ] resources : - pods verbs : [ \"get\" , \"list\" , \"watch\" ]","title":"User Metrics"},{"location":"metrics/#eventsource","text":"","title":"EventSource"},{"location":"metrics/#argo_events_event_service_running_total","text":"How many configured events in the EventSource object are actively running.","title":"argo_events_event_service_running_total"},{"location":"metrics/#argo_events_events_sent_total","text":"How many events have been sent successfully.","title":"argo_events_events_sent_total"},{"location":"metrics/#argo_events_events_sent_failed_total","text":"How many events failed to send to EventBus.","title":"argo_events_events_sent_failed_total"},{"location":"metrics/#argo_events_events_processing_failed_total","text":"How many events failed to process due to all the reasons, it includes argo_events_events_sent_failed_total .","title":"argo_events_events_processing_failed_total"},{"location":"metrics/#argo_events_event_processing_duration_milliseconds","text":"Event processing duration (from getting the event to send it to EventBus) in milliseconds.","title":"argo_events_event_processing_duration_milliseconds"},{"location":"metrics/#sensor","text":"","title":"Sensor"},{"location":"metrics/#argo_events_action_triggered_total","text":"How many actions have been triggered successfully.","title":"argo_events_action_triggered_total"},{"location":"metrics/#argo_events_action_failed_total","text":"How many actions failed.","title":"argo_events_action_failed_total"},{"location":"metrics/#argo_events_action_retries_failed_total","text":"How many actions failed after the retries have been exhausted. This is also incremented if there is no retryStrategy specified.","title":"argo_events_action_retries_failed_total"},{"location":"metrics/#argo_events_action_duration_milliseconds","text":"Action triggering duration.","title":"argo_events_action_duration_milliseconds"},{"location":"metrics/#eventbus","text":"For native NATS EventBus, check this link for the metrics explanation.","title":"EventBus"},{"location":"metrics/#controller-metrics","text":"If you are interested in Argo Events controller metrics, add following to your Prometheus configuration. - job_name: 'argo-events-controllers' kubernetes_sd_configs: - role: pod selectors: - role: pod label: 'app in (eventsource-controller,sensor-controller,eventbus-controller)' relabel_configs: - source_labels: [__address__, __meta_kubernetes_pod_label_app] action: replace regex: (.+);(eventsource-controller|sensor-controller|eventbus-controller) replacement: $1:7777 target_label: '__address__' - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_pod_label_app] action: replace regex: (.+);(eventsource-controller|sensor-controller|eventbus-controller) replacement: $1 target_label: 'namespace'","title":"Controller Metrics"},{"location":"metrics/#golden-signals","text":"Following metrics are considered as Golden Signals of monitoring your applications running with Argo Events. Latency argo_events_event_processing_duration_milliseconds argo_events_action_duration_milliseconds Traffic argo_events_events_sent_total argo_events_action_triggered_total Errors argo_events_events_processing_failed_total argo_events_events_sent_failed_total argo_events_action_failed_total argo_events_action_retries_failed_total Saturation argo_events_event_service_running_total . Other Kubernetes metrics such as CPU or memory.","title":"Golden Signals"},{"location":"quick_start/","text":"Getting Started \u00b6 We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. Note: You will need to have Argo Workflows installed to make this work. The Argo Workflow controller will need to be configured to listen for Workflow objects created in argo-events namespace. (See this link.) The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no \"--namespaced\" argument) so that it has visiblity to all namespaces, or with \"--managed-namespace\" set to define \"argo-events\" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file, setting ARGO_WORKFLOWS_VERSION with your desired version. A list of versions can be found by viewing these project tags in the Argo Workflow GitHub repository. export ARGO_WORKFLOWS_VERSION = 3.5 . 4 kubectl create namespace argo kubectl apply - n argo - f https : // github . com / argoproj / argo - workflows / releases / download / v $ ARGO_WORKFLOWS_VERSION / install . yaml Install Argo Events kubectl create namespace argo-events kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml # Install with a validating admission controller kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml Make sure to have the eventbus pods running in the namespace. Run following command to create the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Setup event-source for webhook as follows. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml The above event-source contains a single event configuration that runs an HTTP server on port 12000 with endpoint example . After running the above command, the event-source controller will create a pod and service. Create a service account with RBAC settings to allow the sensor to trigger workflows, and allow workflows to function. # sensor rbac kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml # workflow rbac kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/workflow-rbac.yaml Create webhook sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml Once the sensor object is created, sensor controller will create corresponding pod and a service. Expose the event-source pod via Ingress, OpenShift Route or port forward to consume requests over HTTP. kubectl -n argo-events port-forward $(kubectl -n argo-events get pod -l eventsource-name=webhook -o name) 12000:12000 & Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Verify that an Argo workflow was triggered. kubectl -n argo-events get workflows | grep \"webhook\"","title":"Getting Started"},{"location":"quick_start/#getting-started","text":"We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. Note: You will need to have Argo Workflows installed to make this work. The Argo Workflow controller will need to be configured to listen for Workflow objects created in argo-events namespace. (See this link.) The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no \"--namespaced\" argument) so that it has visiblity to all namespaces, or with \"--managed-namespace\" set to define \"argo-events\" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file, setting ARGO_WORKFLOWS_VERSION with your desired version. A list of versions can be found by viewing these project tags in the Argo Workflow GitHub repository. export ARGO_WORKFLOWS_VERSION = 3.5 . 4 kubectl create namespace argo kubectl apply - n argo - f https : // github . com / argoproj / argo - workflows / releases / download / v $ ARGO_WORKFLOWS_VERSION / install . yaml Install Argo Events kubectl create namespace argo-events kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml # Install with a validating admission controller kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml Make sure to have the eventbus pods running in the namespace. Run following command to create the eventbus. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Setup event-source for webhook as follows. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml The above event-source contains a single event configuration that runs an HTTP server on port 12000 with endpoint example . After running the above command, the event-source controller will create a pod and service. Create a service account with RBAC settings to allow the sensor to trigger workflows, and allow workflows to function. # sensor rbac kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml # workflow rbac kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/workflow-rbac.yaml Create webhook sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml Once the sensor object is created, sensor controller will create corresponding pod and a service. Expose the event-source pod via Ingress, OpenShift Route or port forward to consume requests over HTTP. kubectl -n argo-events port-forward $(kubectl -n argo-events get pod -l eventsource-name=webhook -o name) 12000:12000 & Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Verify that an Argo workflow was triggered. kubectl -n argo-events get workflows | grep \"webhook\"","title":"Getting Started"},{"location":"releases/","text":"Releases \u00b6 Latest releases Supported Versions \u00b6 Versions are expressed as x.y.z, where x is the major version, y is the minor version, and z is the patch version, following Semantic Versioning terminology. We maintain release branches for the most recent two minor releases. Fixes may be backported to release branches, depending on severity, risk, and, feasibility. If a release contains breaking changes, or CVE fixes, this will documented in the release notes. Supported Version Skew \u00b6 Image versions of eventsource , sensor , eventbus-controller , eventsource-controller , sensor-controller and events-webhook should be the same. Release Cycle \u00b6 For unstable , we build and tag latest images for every commit to master. New minor versions are released roughly every 2 months. Release candidates for each release are typically available for 2 weeks before the release becomes generally available. Otherwise, we typically patch the release as needed.","title":"Releases"},{"location":"releases/#releases","text":"Latest releases","title":"Releases"},{"location":"releases/#supported-versions","text":"Versions are expressed as x.y.z, where x is the major version, y is the minor version, and z is the patch version, following Semantic Versioning terminology. We maintain release branches for the most recent two minor releases. Fixes may be backported to release branches, depending on severity, risk, and, feasibility. If a release contains breaking changes, or CVE fixes, this will documented in the release notes.","title":"Supported Versions"},{"location":"releases/#supported-version-skew","text":"Image versions of eventsource , sensor , eventbus-controller , eventsource-controller , sensor-controller and events-webhook should be the same.","title":"Supported Version Skew"},{"location":"releases/#release-cycle","text":"For unstable , we build and tag latest images for every commit to master. New minor versions are released roughly every 2 months. Release candidates for each release are typically available for 2 weeks before the release becomes generally available. Otherwise, we typically patch the release as needed.","title":"Release Cycle"},{"location":"security/","text":"Security \u00b6 Please see SECURITY.md","title":"Security"},{"location":"security/#security","text":"Please see SECURITY.md","title":"Security"},{"location":"service-accounts/","text":"Service Accounts \u00b6 Service Account for EventSources \u00b6 A Service Account can be specified in the EventSource object with spec.template.serviceAccountName , however it is not needed for all the EventSource types except resource . For a resource EventSource, you need to specify a Service Account and give it list and watch permissions for the resource being watched. For example, if you want to watch actions on Deployment objects, you need to: Create a Service Account. kubectl -n your-namespace create sa my-sa Grant RBAC privileges to it. kubectl -n your-namespace create role deployments-watcher --verb=list,watch --resource=deployments.apps kubectl -n your-namespace create rolebinding deployments-watcher-role-binding --role=deployments-watcher --serviceaccount=your-namespace:my-sa or (if you want to watch cluster scope) kubectl create clusterrole deployments-watcher --verb=list,watch --resource=deployments.apps kubectl create clusterrolebinding deployments-watcher-clusterrole-binding --clusterrole=deployments-watcher --serviceaccount=your-namespace:my-sa Service Account for Sensors \u00b6 A Service Account also can be specified in a Sensor object via spec.template.serviceAccountName , this is only needed when k8s trigger or argoWorkflow trigger is defined in the Sensor object. The sensor examples provided by us use operate-workflow-sa service account to execute the triggers, but it has more permissions than needed, and you may want to limit those privileges based on your use-case. It's always a good practice to create a service account with minimum privileges to execute it. Argo Workflow Trigger \u00b6 To submit a workflow through argoWorkflow trigger, make sure to grant the Service Account create and list access to workflows.argoproj.io . To resubmit , retry , resume or suspend a workflow through argoWorkflow trigger, the service account needs update and get access to workflows.argoproj.io . K8s Resource Trigger \u00b6 To trigger a K8s resource including workflows.argoproj.io through k8s trigger, make sure to grant create permission to that resource. AWS Lambda, HTTP, Slack, NATS, Kafka, and OpenWhisk Triggers \u00b6 For these triggers, you don't need to specify a Service Account to the Sensor. Service Account for Triggered Workflows (or other K8s resources) \u00b6 When the Sensor is used to trigger a Workflow, you might need to configure the Service Account used in the Workflow spec ( NOT spec.template.serviceAccountName ) following Argo Workflow instructions . If it is used to trigger other K8s resources (i.e. a Deployment), make sure to follow least privilege principle.","title":"Service Accounts"},{"location":"service-accounts/#service-accounts","text":"","title":"Service Accounts"},{"location":"service-accounts/#service-account-for-eventsources","text":"A Service Account can be specified in the EventSource object with spec.template.serviceAccountName , however it is not needed for all the EventSource types except resource . For a resource EventSource, you need to specify a Service Account and give it list and watch permissions for the resource being watched. For example, if you want to watch actions on Deployment objects, you need to: Create a Service Account. kubectl -n your-namespace create sa my-sa Grant RBAC privileges to it. kubectl -n your-namespace create role deployments-watcher --verb=list,watch --resource=deployments.apps kubectl -n your-namespace create rolebinding deployments-watcher-role-binding --role=deployments-watcher --serviceaccount=your-namespace:my-sa or (if you want to watch cluster scope) kubectl create clusterrole deployments-watcher --verb=list,watch --resource=deployments.apps kubectl create clusterrolebinding deployments-watcher-clusterrole-binding --clusterrole=deployments-watcher --serviceaccount=your-namespace:my-sa","title":"Service Account for EventSources"},{"location":"service-accounts/#service-account-for-sensors","text":"A Service Account also can be specified in a Sensor object via spec.template.serviceAccountName , this is only needed when k8s trigger or argoWorkflow trigger is defined in the Sensor object. The sensor examples provided by us use operate-workflow-sa service account to execute the triggers, but it has more permissions than needed, and you may want to limit those privileges based on your use-case. It's always a good practice to create a service account with minimum privileges to execute it.","title":"Service Account for Sensors"},{"location":"service-accounts/#argo-workflow-trigger","text":"To submit a workflow through argoWorkflow trigger, make sure to grant the Service Account create and list access to workflows.argoproj.io . To resubmit , retry , resume or suspend a workflow through argoWorkflow trigger, the service account needs update and get access to workflows.argoproj.io .","title":"Argo Workflow Trigger"},{"location":"service-accounts/#k8s-resource-trigger","text":"To trigger a K8s resource including workflows.argoproj.io through k8s trigger, make sure to grant create permission to that resource.","title":"K8s Resource Trigger"},{"location":"service-accounts/#aws-lambda-http-slack-nats-kafka-and-openwhisk-triggers","text":"For these triggers, you don't need to specify a Service Account to the Sensor.","title":"AWS Lambda, HTTP, Slack, NATS, Kafka, and OpenWhisk Triggers"},{"location":"service-accounts/#service-account-for-triggered-workflows-or-other-k8s-resources","text":"When the Sensor is used to trigger a Workflow, you might need to configure the Service Account used in the Workflow spec ( NOT spec.template.serviceAccountName ) following Argo Workflow instructions . If it is used to trigger other K8s resources (i.e. a Deployment), make sure to follow least privilege principle.","title":"Service Account for Triggered Workflows (or other K8s resources)"},{"location":"validating-admission-webhook/","text":"Validating Admission Webhook \u00b6 v1.3 and after Overview \u00b6 Starting from v1.3, a Validating Admission Webhook is introduced to the project. To install the validating webhook, use following command (change the version): kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/ { version } /manifests/install-validating-webhook.yaml Benefits \u00b6 Using the validating webhook has following benefits: It notifies the error at the time applying the faulty spec, so that you don't need to check the CRD object status field to see if there's any condition errors later on. e.g. Creating an exotic NATS EventBus without ClusterID specified: cat < apiVersion: argoproj.io/v1alpha1 > kind: EventBus > metadata: > name: default > spec: > nats: > exotic: {} > EOF Error from server ( BadRequest ) : error when creating \"STDIN\" : admission webhook \"webhook.argo-events.argoproj.io\" denied the request: \"spec.nats.exotic.clusterID\" is missing Spec updating behavior can be validated. Updating existing specs requires more validation, besides checking if the new spec is valid, we also need to check if there's any immutable fields being updated. This can not be done in the controller reconciliation, but we can do it by using the validating webhook. For example, updating Auth Strategy for a native NATS EventBus is prohibited, a denied response as following will be returned. Error from server ( BadRequest ) : error when applying patch: { \"metadata\" : { \"annotations\" : { \"kubectl.kubernetes.io/last-applied-configuration\" : \"{\\\"apiVersion\\\":\\\"argoproj.io/v1alpha1\\\",\\\"kind\\\":\\\"EventBus\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"default\\\",\\\"namespace\\\":\\\"argo-events\\\"},\\\"spec\\\":{\\\"nats\\\":{\\\"native\\\":{\\\"replicas\\\":3}}}}\\n\" }} , \"spec\" : { \"nats\" : { \"native\" : { \"auth\" :null, \"maxAge\" :null, \"securityContext\" :null }}}} to: Resource: \"argoproj.io/v1alpha1, Resource=eventbus\" , GroupVersionKind: \"argoproj.io/v1alpha1, Kind=EventBus\" Name: \"default\" , Namespace: \"argo-events\" for : \"test-eventbus.yaml\" : admission webhook \"webhook.argo-events.argoproj.io\" denied the request: \"spec.nats.native.auth\" is immutable, can not be updated","title":"Validating Admission Webhook"},{"location":"validating-admission-webhook/#validating-admission-webhook","text":"v1.3 and after","title":"Validating Admission Webhook"},{"location":"validating-admission-webhook/#overview","text":"Starting from v1.3, a Validating Admission Webhook is introduced to the project. To install the validating webhook, use following command (change the version): kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/ { version } /manifests/install-validating-webhook.yaml","title":"Overview"},{"location":"validating-admission-webhook/#benefits","text":"Using the validating webhook has following benefits: It notifies the error at the time applying the faulty spec, so that you don't need to check the CRD object status field to see if there's any condition errors later on. e.g. Creating an exotic NATS EventBus without ClusterID specified: cat < apiVersion: argoproj.io/v1alpha1 > kind: EventBus > metadata: > name: default > spec: > nats: > exotic: {} > EOF Error from server ( BadRequest ) : error when creating \"STDIN\" : admission webhook \"webhook.argo-events.argoproj.io\" denied the request: \"spec.nats.exotic.clusterID\" is missing Spec updating behavior can be validated. Updating existing specs requires more validation, besides checking if the new spec is valid, we also need to check if there's any immutable fields being updated. This can not be done in the controller reconciliation, but we can do it by using the validating webhook. For example, updating Auth Strategy for a native NATS EventBus is prohibited, a denied response as following will be returned. Error from server ( BadRequest ) : error when applying patch: { \"metadata\" : { \"annotations\" : { \"kubectl.kubernetes.io/last-applied-configuration\" : \"{\\\"apiVersion\\\":\\\"argoproj.io/v1alpha1\\\",\\\"kind\\\":\\\"EventBus\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"default\\\",\\\"namespace\\\":\\\"argo-events\\\"},\\\"spec\\\":{\\\"nats\\\":{\\\"native\\\":{\\\"replicas\\\":3}}}}\\n\" }} , \"spec\" : { \"nats\" : { \"native\" : { \"auth\" :null, \"maxAge\" :null, \"securityContext\" :null }}}} to: Resource: \"argoproj.io/v1alpha1, Resource=eventbus\" , GroupVersionKind: \"argoproj.io/v1alpha1, Kind=EventBus\" Name: \"default\" , Namespace: \"argo-events\" for : \"test-eventbus.yaml\" : admission webhook \"webhook.argo-events.argoproj.io\" denied the request: \"spec.nats.native.auth\" is immutable, can not be updated","title":"Benefits"},{"location":"concepts/architecture/","text":"Architecture \u00b6 Main components of Argo Events are: Event Source Sensor Eventbus Trigger","title":"Architecture"},{"location":"concepts/architecture/#architecture","text":"Main components of Argo Events are: Event Source Sensor Eventbus Trigger","title":"Architecture"},{"location":"concepts/event_source/","text":"Event Source \u00b6 An EventSource defines the configurations required to consume events from external sources like AWS SNS, SQS, GCP PubSub, Webhooks, etc. It further transforms the events into the cloudevents and dispatches them over to the eventbus. Available event-sources: AMQP AWS SNS AWS SQS Azure Events Hub Azure Queue Storage Bitbucket Bitbucket Server Calendar Emitter File Based Events GCP PubSub Generic EventSource GitHub GitLab HDFS K8s Resources Kafka Minio NATS NetApp StorageGrid MQTT NSQ Pulsar Redis Slack Stripe Webhooks Specification \u00b6 The complete specification is available here . Examples \u00b6 Examples are located under examples/event-sources .","title":"Event Source"},{"location":"concepts/event_source/#event-source","text":"An EventSource defines the configurations required to consume events from external sources like AWS SNS, SQS, GCP PubSub, Webhooks, etc. It further transforms the events into the cloudevents and dispatches them over to the eventbus. Available event-sources: AMQP AWS SNS AWS SQS Azure Events Hub Azure Queue Storage Bitbucket Bitbucket Server Calendar Emitter File Based Events GCP PubSub Generic EventSource GitHub GitLab HDFS K8s Resources Kafka Minio NATS NetApp StorageGrid MQTT NSQ Pulsar Redis Slack Stripe Webhooks","title":"Event Source"},{"location":"concepts/event_source/#specification","text":"The complete specification is available here .","title":"Specification"},{"location":"concepts/event_source/#examples","text":"Examples are located under examples/event-sources .","title":"Examples"},{"location":"concepts/eventbus/","text":"EventBus \u00b6 The EventBus acts as the transport layer of Argo-Events by connecting the EventSources and Sensors. EventSources publish the events while the Sensors subscribe to the events to execute triggers. There are three implementations of the EventBus: NATS (deprecated), Jetstream , and Kafka .","title":"EventBus"},{"location":"concepts/eventbus/#eventbus","text":"The EventBus acts as the transport layer of Argo-Events by connecting the EventSources and Sensors. EventSources publish the events while the Sensors subscribe to the events to execute triggers. There are three implementations of the EventBus: NATS (deprecated), Jetstream , and Kafka .","title":"EventBus"},{"location":"concepts/sensor/","text":"Sensor \u00b6 Sensor defines a set of event dependencies (inputs) and triggers (outputs). It listens to events on the eventbus and acts as an event dependency manager to resolve and execute the triggers. Event dependency \u00b6 A dependency is an event the sensor is waiting to happen. Specification \u00b6 Complete specification is available here . Examples \u00b6 Examples are located under examples/sensors .","title":"Sensor"},{"location":"concepts/sensor/#sensor","text":"Sensor defines a set of event dependencies (inputs) and triggers (outputs). It listens to events on the eventbus and acts as an event dependency manager to resolve and execute the triggers.","title":"Sensor"},{"location":"concepts/sensor/#event-dependency","text":"A dependency is an event the sensor is waiting to happen.","title":"Event dependency"},{"location":"concepts/sensor/#specification","text":"Complete specification is available here .","title":"Specification"},{"location":"concepts/sensor/#examples","text":"Examples are located under examples/sensors .","title":"Examples"},{"location":"concepts/trigger/","text":"Trigger \u00b6 A Trigger is the resource/workload executed by the sensor once the event dependencies are resolved. Trigger Types \u00b6 AWS Lambda Apache OpenWhisk Argo Rollouts Argo Workflows Custom - Build Your Own HTTP Requests - Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) Kafka Messages NATS Messages Slack Notifications Azure Event Hubs Messages Create any Kubernetes Objects Log (for debugging event bus messages)","title":"Trigger"},{"location":"concepts/trigger/#trigger","text":"A Trigger is the resource/workload executed by the sensor once the event dependencies are resolved.","title":"Trigger"},{"location":"concepts/trigger/#trigger-types","text":"AWS Lambda Apache OpenWhisk Argo Rollouts Argo Workflows Custom - Build Your Own HTTP Requests - Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) Kafka Messages NATS Messages Slack Notifications Azure Event Hubs Messages Create any Kubernetes Objects Log (for debugging event bus messages)","title":"Trigger Types"},{"location":"eventbus/antiaffinity/","text":"Anti-affinity \u00b6 Kubernetes offers a concept of anti-affinity , meaning that pods are scheduled on separate nodes. The anti-affinity can either be \"best effort\" or a hard requirement. A best effort and a hard requirement node anti-affinity config look like below, if you want to do AZ (Availability Zone) anti-affinity, change the value of topologyKey from kubernetes.io/hostname to topology.kubernetes.io/zone . # Best effort affinity : podAntiAffinity : preferredDuringSchedulingIgnoredDuringExecution : - podAffinityTerm : labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname weight : 100 # Hard requirement affinity : podAntiAffinity : requiredDuringSchedulingIgnoredDuringExecution : - labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname","title":"Antiaffinity"},{"location":"eventbus/antiaffinity/#anti-affinity","text":"Kubernetes offers a concept of anti-affinity , meaning that pods are scheduled on separate nodes. The anti-affinity can either be \"best effort\" or a hard requirement. A best effort and a hard requirement node anti-affinity config look like below, if you want to do AZ (Availability Zone) anti-affinity, change the value of topologyKey from kubernetes.io/hostname to topology.kubernetes.io/zone . # Best effort affinity : podAntiAffinity : preferredDuringSchedulingIgnoredDuringExecution : - podAffinityTerm : labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname weight : 100 # Hard requirement affinity : podAntiAffinity : requiredDuringSchedulingIgnoredDuringExecution : - labelSelector : matchLabels : controller : eventbus-controller eventbus-name : default topologyKey : kubernetes.io/hostname","title":"Anti-affinity"},{"location":"eventbus/eventbus/","text":"EventBus \u00b6 v0.17.0 and after EventBus is a Kubernetes Custom Resource which is used for event transmission from EventSources to Sensors. Currently, EventBus is backed by NATS , including both their NATS Streaming service, their newer Jetstream service, and Kafka. In the future, this can be expanded to support other technologies as well. EventBus is namespaced; an EventBus object is required in a namespace to make EventSource and Sensor work. The common practice is to create an EventBus named default in the namespace. If you want to use a different name, or you want to have multiple EventBus in one namespace, you need to specify eventBusName in the spec of EventSource and Sensor correspondingly, so that they can find the right one. See EventSource spec and Sensor spec .","title":"EventBus"},{"location":"eventbus/eventbus/#eventbus","text":"v0.17.0 and after EventBus is a Kubernetes Custom Resource which is used for event transmission from EventSources to Sensors. Currently, EventBus is backed by NATS , including both their NATS Streaming service, their newer Jetstream service, and Kafka. In the future, this can be expanded to support other technologies as well. EventBus is namespaced; an EventBus object is required in a namespace to make EventSource and Sensor work. The common practice is to create an EventBus named default in the namespace. If you want to use a different name, or you want to have multiple EventBus in one namespace, you need to specify eventBusName in the spec of EventSource and Sensor correspondingly, so that they can find the right one. See EventSource spec and Sensor spec .","title":"EventBus"},{"location":"eventbus/jetstream/","text":"Jetstream \u00b6 Jetstream is the latest streaming server implemented by the NATS community, with improvements from the original NATS Streaming (which will eventually be deprecated). A simplest Jetstream EventBus example: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : jetstream : version : latest # Do NOT use \"latest\" but a specific version in your real deployment # See: https://argoproj.github.io/argo-events/eventbus/jetstream/#version The example above brings up a Jetstream StatefulSet with 3 replicas in the namespace. Properties \u00b6 Check here for the full spec of jetstream . version \u00b6 The version number specified in the example above is the release number for the NATS server. We will support some subset of these as we've tried them out and only plan to upgrade them as needed. The list of available versions is managed by the controller manager ConfigMap, which can be updated to support new versions. kubectl get configmap argo-events-controller-config -o yaml Check here for a list of configurable features per version. A more involved example \u00b6 Another example with more configuration: apiVersion: argoproj.io/v1alpha1 kind: EventBus metadata: name: default spec: jetstream: version: latest # Do NOT use \"latest\" but a specific version in your real deployment replicas: 5 persistence: # optional storageClassName: standard accessMode: ReadWriteOnce volumeSize: 10Gi streamConfig: | # see default values in argo-events-controller-config maxAge: 24h settings: | max_file_store: 1GB # see default values in argo-events-controller-config startArgs: - \"-D\" # debug-level logs Security \u00b6 For Jetstream, TLS is turned on for all client-server communication as well as between Jetstream nodes. In addition, for client-server communication we by default use password authentication (and because TLS is turned on, the password is encrypted). How it works under the hood \u00b6 Jetstream has the concept of a Stream, and Subjects (i.e. topics) which are used on a Stream. From the documentation: \u201cEach Stream defines how messages are stored and what the limits (duration, size, interest) of the retention are.\u201d For Argo Events, we have one Stream called \"default\" with a single set of settings, but we have multiple subjects, each of which is named default.. . Sensors subscribe to the subjects they need using durable consumers. Exotic \u00b6 To use an existing JetStream service, follow the example below. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : jetstreamExotic : url : nats://xxxxx:xxx accessSecret : name : my-secret-name key : secret-key streamConfig : \"\"","title":"Jetstream"},{"location":"eventbus/jetstream/#jetstream","text":"Jetstream is the latest streaming server implemented by the NATS community, with improvements from the original NATS Streaming (which will eventually be deprecated). A simplest Jetstream EventBus example: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : jetstream : version : latest # Do NOT use \"latest\" but a specific version in your real deployment # See: https://argoproj.github.io/argo-events/eventbus/jetstream/#version The example above brings up a Jetstream StatefulSet with 3 replicas in the namespace.","title":"Jetstream"},{"location":"eventbus/jetstream/#properties","text":"Check here for the full spec of jetstream .","title":"Properties"},{"location":"eventbus/jetstream/#version","text":"The version number specified in the example above is the release number for the NATS server. We will support some subset of these as we've tried them out and only plan to upgrade them as needed. The list of available versions is managed by the controller manager ConfigMap, which can be updated to support new versions. kubectl get configmap argo-events-controller-config -o yaml Check here for a list of configurable features per version.","title":"version"},{"location":"eventbus/jetstream/#a-more-involved-example","text":"Another example with more configuration: apiVersion: argoproj.io/v1alpha1 kind: EventBus metadata: name: default spec: jetstream: version: latest # Do NOT use \"latest\" but a specific version in your real deployment replicas: 5 persistence: # optional storageClassName: standard accessMode: ReadWriteOnce volumeSize: 10Gi streamConfig: | # see default values in argo-events-controller-config maxAge: 24h settings: | max_file_store: 1GB # see default values in argo-events-controller-config startArgs: - \"-D\" # debug-level logs","title":"A more involved example"},{"location":"eventbus/jetstream/#security","text":"For Jetstream, TLS is turned on for all client-server communication as well as between Jetstream nodes. In addition, for client-server communication we by default use password authentication (and because TLS is turned on, the password is encrypted).","title":"Security"},{"location":"eventbus/jetstream/#how-it-works-under-the-hood","text":"Jetstream has the concept of a Stream, and Subjects (i.e. topics) which are used on a Stream. From the documentation: \u201cEach Stream defines how messages are stored and what the limits (duration, size, interest) of the retention are.\u201d For Argo Events, we have one Stream called \"default\" with a single set of settings, but we have multiple subjects, each of which is named default.. . Sensors subscribe to the subjects they need using durable consumers.","title":"How it works under the hood"},{"location":"eventbus/jetstream/#exotic","text":"To use an existing JetStream service, follow the example below. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : jetstreamExotic : url : nats://xxxxx:xxx accessSecret : name : my-secret-name key : secret-key streamConfig : \"\"","title":"Exotic"},{"location":"eventbus/kafka/","text":"Kafka is a widely used event streaming platform. We recommend using Kafka if you have a lot of events and want to horizontally scale your Sensors. If you are looking to get started quickly with Argo Events we recommend using Jetstream instead. When using a Kafka EventBus you must already have a Kafka cluster set up and topics created (unless you have auto create enabled, see topics below). Example \u00b6 kind : EventBus metadata : name : default spec : kafka : url : kafka:9092 # must be managed independently topic : \"example\" # optional See here for the full specification. Properties \u00b6 url \u00b6 Comma seperated list of kafka broker urls, the kafka broker must be managed independently of Argo Events. topic \u00b6 The topic name, defaults to {namespace-name}-{eventbus-name} . Two additional topics per Sensor are also required, see see topics below for more information. version \u00b6 Kafka version, we recommend not manually setting this field in most circumstances. Defaults to the oldest supported stable version. tls \u00b6 Enables TLS on the kafka connection. tls: caCertSecret: name: my-secret key: ca-cert-key clientCertSecret: name: my-secret key: client-cert-key clientKeySecret: name: my-secret key: client-key-key sasl \u00b6 Enables SASL authentication on the kafka connection. sasl: mechanism: PLAIN passwordSecret: key: password name: my-user userSecret: key: user name: my-user consumerGroup.groupName \u00b6 Consumer group name, defaults to {namespace-name}-{sensor-name} . consumerGroup.rebalanceStrategy \u00b6 The kafka rebalance strategy, can be one of: sticky, roundrobin, range. Defaults to range. consumerGroup.startOldest \u00b6 When starting up a new group do we want to start from the oldest event (true) or the newest event (false). Defaults to false Security \u00b6 You can enable TLS or SASL authentication, see above for configuration details. You must enable these features in your Kafka Cluster and make the certifactes/credentials available in a Kubernetes secret. Topics \u00b6 The Kafka EventBus requires one event topic and two additional topics (trigger and action) per Sensor. These topics will not be created automatically unless the Kafka auto.create.topics.enable cluster configuration is set to true, otherwise it is your responsibility to create these topics. If a topic does not exist and cannot be automatically created, the EventSource and/or Sensor will exit with an error. If you want to take advantage of the horizontal scaling enabled by the Kafka EventBus be sure to create topics with more than one partition. By default the topics are named as follows. topic name event {namespace}-{eventbus-name} trigger {namespace}-{eventbus-name}-{sensor-name}-trigger action {namespace}-{eventbus-name}-{sensor-name}-action If a topic name is specified in the EventBus specification, then the topics are named as follows. topic name event {spec.kafka.topic} trigger {spec.kafka.topic}-{sensor-name}-trigger action {spec.kafka.topic}-{sensor-name}-action Horizontal Scaling and Leader Election \u00b6 Sensors that use a Kafka EventBus can scale horizontally. Specifiying replicas greater than one will result in all Sensor pods actively processing events. However, an EventSource that uses a Kafka EventBus cannot necessarily be horizontally scaled in an active-active manner, see EventSource HA for more details. In an active-passive scenario a Kubernetes leader election is used.","title":"Kafka"},{"location":"eventbus/kafka/#example","text":"kind : EventBus metadata : name : default spec : kafka : url : kafka:9092 # must be managed independently topic : \"example\" # optional See here for the full specification.","title":"Example"},{"location":"eventbus/kafka/#properties","text":"","title":"Properties"},{"location":"eventbus/kafka/#url","text":"Comma seperated list of kafka broker urls, the kafka broker must be managed independently of Argo Events.","title":"url"},{"location":"eventbus/kafka/#topic","text":"The topic name, defaults to {namespace-name}-{eventbus-name} . Two additional topics per Sensor are also required, see see topics below for more information.","title":"topic"},{"location":"eventbus/kafka/#version","text":"Kafka version, we recommend not manually setting this field in most circumstances. Defaults to the oldest supported stable version.","title":"version"},{"location":"eventbus/kafka/#tls","text":"Enables TLS on the kafka connection. tls: caCertSecret: name: my-secret key: ca-cert-key clientCertSecret: name: my-secret key: client-cert-key clientKeySecret: name: my-secret key: client-key-key","title":"tls"},{"location":"eventbus/kafka/#sasl","text":"Enables SASL authentication on the kafka connection. sasl: mechanism: PLAIN passwordSecret: key: password name: my-user userSecret: key: user name: my-user","title":"sasl"},{"location":"eventbus/kafka/#consumergroupgroupname","text":"Consumer group name, defaults to {namespace-name}-{sensor-name} .","title":"consumerGroup.groupName"},{"location":"eventbus/kafka/#consumergrouprebalancestrategy","text":"The kafka rebalance strategy, can be one of: sticky, roundrobin, range. Defaults to range.","title":"consumerGroup.rebalanceStrategy"},{"location":"eventbus/kafka/#consumergroupstartoldest","text":"When starting up a new group do we want to start from the oldest event (true) or the newest event (false). Defaults to false","title":"consumerGroup.startOldest"},{"location":"eventbus/kafka/#security","text":"You can enable TLS or SASL authentication, see above for configuration details. You must enable these features in your Kafka Cluster and make the certifactes/credentials available in a Kubernetes secret.","title":"Security"},{"location":"eventbus/kafka/#topics","text":"The Kafka EventBus requires one event topic and two additional topics (trigger and action) per Sensor. These topics will not be created automatically unless the Kafka auto.create.topics.enable cluster configuration is set to true, otherwise it is your responsibility to create these topics. If a topic does not exist and cannot be automatically created, the EventSource and/or Sensor will exit with an error. If you want to take advantage of the horizontal scaling enabled by the Kafka EventBus be sure to create topics with more than one partition. By default the topics are named as follows. topic name event {namespace}-{eventbus-name} trigger {namespace}-{eventbus-name}-{sensor-name}-trigger action {namespace}-{eventbus-name}-{sensor-name}-action If a topic name is specified in the EventBus specification, then the topics are named as follows. topic name event {spec.kafka.topic} trigger {spec.kafka.topic}-{sensor-name}-trigger action {spec.kafka.topic}-{sensor-name}-action","title":"Topics"},{"location":"eventbus/kafka/#horizontal-scaling-and-leader-election","text":"Sensors that use a Kafka EventBus can scale horizontally. Specifiying replicas greater than one will result in all Sensor pods actively processing events. However, an EventSource that uses a Kafka EventBus cannot necessarily be horizontally scaled in an active-active manner, see EventSource HA for more details. In an active-passive scenario a Kubernetes leader election is used.","title":"Horizontal Scaling and Leader Election"},{"location":"eventbus/stan/","text":"NATS Streaming \u00b6 You can create a native NATS EventBus, or connect to an existing NATS Streaming service with exotic NATS EventBus. Native \u00b6 A simplest native NATS EventBus example: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : {} The example above brings up a NATS Streaming StatefulSet with 3 replicas in the namespace. The following example shows an EventBus with token auth strategy and persistent volumes. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : replicas : 3 # optional, defaults to 3, and requires minimal 3 auth : token # optional, default to none persistence : # optional storageClassName : standard accessMode : ReadWriteOnce volumeSize : 10Gi Properties \u00b6 Check here for the full spec of native . replicas - StatefulSet replicas, defaults to 3, and requires minimal 3. According to NATS Streaming doc , the size should probably be limited to 3 to 5, and odd number is recommended. auth - The strategy that clients connect to NATS Streaming service, none or token is currently supported, defaults to none . If token strategy is used, the system will generate a token and store it in K8s secrets (one for client, one for server), EventSource and Sensor PODs will automatically load the client secret and use it to connect to the EventBus. antiAffinity - Whether to create the StatefulSet PODs with anti-affinity rule. Deprecated in v1.3 , will be removed in v1.5 , use affinity instead. nodeSelector - Node selector for StatefulSet PODs. tolerations - Tolerations for the PODs. persistence - Whether to use a persistence volume for the data. securityContext - POD level security attributes and common container settings. maxAge - Max Age of existing messages, i.e. 72h , 4h35m , defaults to 72h . maxMsgs - Max number of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1000000. maxBytes - Total size of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1GB. maxSubs - Maximum number of subscriptions, 0 means unlimited. Defaults to 1000. maxPayload - Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB. imagePullSecrets - Secrets used to pull images. serviceAccountName - In case your firm requires to use a service account other than default . priority - Priority of the StatefulSet PODs. priorityClassName - PriorityClassName of the StatefulSet PODs. affinity - Affinity settings for the StatefulSet PODs. More About Native NATS EventBus \u00b6 Messages limit per channel defaults to 1,000,000. It could be customized by setting spec.nats.native.maxMsgs , 0 means unlimited. Message bytes per channel defaults to 1GB , setting spec.nats.native.maxBytes to customize it, \"0\" means unlimited. Max age of messages is 72 hours, which means messages over 72 hours will be deleted automatically. It can be customized by setting spec.nats.native.maxAge , i.e. 240h . Max subscription number is defaults to 1000 , it could be customized by setting spec.nats.native.maxSubs . Exotic \u00b6 To use an existing NATS Streaming service, follow the example below. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : exotic : url : nats://xxxxx:xxx clusterID : cluster-id auth : token accessSecret : name : my-secret-name key : secret-key More Information \u00b6 To view a finalized EventBus config: kubectl get eventbus default -o json | jq '.status.config' A sample result: { \"nats\" : { \"accessSecret\" : { \"key\" : \"client-auth\" , \"name\" : \"eventbus-default-client\" }, \"auth\" : \"token\" , \"clusterID\" : \"eventbus-default\" , \"url\" : \"nats://eventbus-default-stan-svc:4222\" } } All the events in a namespace are published to same channel/subject/topic named eventbus-{namespace} in the EventBus.","title":"Stan"},{"location":"eventbus/stan/#nats-streaming","text":"You can create a native NATS EventBus, or connect to an existing NATS Streaming service with exotic NATS EventBus.","title":"NATS Streaming"},{"location":"eventbus/stan/#native","text":"A simplest native NATS EventBus example: apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : {} The example above brings up a NATS Streaming StatefulSet with 3 replicas in the namespace. The following example shows an EventBus with token auth strategy and persistent volumes. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : native : replicas : 3 # optional, defaults to 3, and requires minimal 3 auth : token # optional, default to none persistence : # optional storageClassName : standard accessMode : ReadWriteOnce volumeSize : 10Gi","title":"Native"},{"location":"eventbus/stan/#properties","text":"Check here for the full spec of native . replicas - StatefulSet replicas, defaults to 3, and requires minimal 3. According to NATS Streaming doc , the size should probably be limited to 3 to 5, and odd number is recommended. auth - The strategy that clients connect to NATS Streaming service, none or token is currently supported, defaults to none . If token strategy is used, the system will generate a token and store it in K8s secrets (one for client, one for server), EventSource and Sensor PODs will automatically load the client secret and use it to connect to the EventBus. antiAffinity - Whether to create the StatefulSet PODs with anti-affinity rule. Deprecated in v1.3 , will be removed in v1.5 , use affinity instead. nodeSelector - Node selector for StatefulSet PODs. tolerations - Tolerations for the PODs. persistence - Whether to use a persistence volume for the data. securityContext - POD level security attributes and common container settings. maxAge - Max Age of existing messages, i.e. 72h , 4h35m , defaults to 72h . maxMsgs - Max number of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1000000. maxBytes - Total size of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1GB. maxSubs - Maximum number of subscriptions, 0 means unlimited. Defaults to 1000. maxPayload - Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB. imagePullSecrets - Secrets used to pull images. serviceAccountName - In case your firm requires to use a service account other than default . priority - Priority of the StatefulSet PODs. priorityClassName - PriorityClassName of the StatefulSet PODs. affinity - Affinity settings for the StatefulSet PODs.","title":"Properties"},{"location":"eventbus/stan/#more-about-native-nats-eventbus","text":"Messages limit per channel defaults to 1,000,000. It could be customized by setting spec.nats.native.maxMsgs , 0 means unlimited. Message bytes per channel defaults to 1GB , setting spec.nats.native.maxBytes to customize it, \"0\" means unlimited. Max age of messages is 72 hours, which means messages over 72 hours will be deleted automatically. It can be customized by setting spec.nats.native.maxAge , i.e. 240h . Max subscription number is defaults to 1000 , it could be customized by setting spec.nats.native.maxSubs .","title":"More About Native NATS EventBus"},{"location":"eventbus/stan/#exotic","text":"To use an existing NATS Streaming service, follow the example below. apiVersion : argoproj.io/v1alpha1 kind : EventBus metadata : name : default spec : nats : exotic : url : nats://xxxxx:xxx clusterID : cluster-id auth : token accessSecret : name : my-secret-name key : secret-key","title":"Exotic"},{"location":"eventbus/stan/#more-information","text":"To view a finalized EventBus config: kubectl get eventbus default -o json | jq '.status.config' A sample result: { \"nats\" : { \"accessSecret\" : { \"key\" : \"client-auth\" , \"name\" : \"eventbus-default-client\" }, \"auth\" : \"token\" , \"clusterID\" : \"eventbus-default\" , \"url\" : \"nats://eventbus-default-stan-svc:4222\" } } All the events in a namespace are published to same channel/subject/topic named eventbus-{namespace} in the EventBus.","title":"More Information"},{"location":"eventsources/calendar-catch-up/","text":"Calender EventSource Catch Up \u00b6 Catch-up feature allow Calender eventsources to execute the missed schedules from last run. Enable Catch-up for Calendar EventSource \u00b6 User can configure catch up on each events in eventsource. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : calendar spec : template : serviceAccountName : configmap-sa # assign a service account with read, write permissions on configmaps calendar : example-with-catch-up : # Catchup the missed events from last Event timestamp. last event will be persisted in configmap. schedule : \"* * * * *\" persistence : catchup : enabled : true # Check missed schedules from last persisted event time on every start maxDuration : 5m # maximum amount of duration go back for the catch-up configMap : # Configmap for persist the last successful event timestamp createIfNotExist : true name : test-configmap Last calender event persisted in configured configmap. Same configmap can be used by multiple events configuration. data : calendar.example-with-catch-up : '{\"eventTime\":\"2020-10-19 22:50:00.0003192 +0000 UTC m=+683.567066901\"}' Service Account \u00b6 To make Calendar EventSource catch-up work, a Service Account with proper RBAC settings needs to be provided. If the configMap is not existing, and createIfNotExist: true is set, a Service Account bound with following Role is required. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : example-configmap-access-role rules : - apiGroups : - \"\" resources : - configmaps verbs : - get - create - update If the configmap is already existing, create can be removed from the verbs list. Disable the catchup \u00b6 Set false to catchup-->enabled element catchup : enabled : false","title":"Calender EventSource Catch Up"},{"location":"eventsources/calendar-catch-up/#calender-eventsource-catch-up","text":"Catch-up feature allow Calender eventsources to execute the missed schedules from last run.","title":"Calender EventSource Catch Up"},{"location":"eventsources/calendar-catch-up/#enable-catch-up-for-calendar-eventsource","text":"User can configure catch up on each events in eventsource. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : calendar spec : template : serviceAccountName : configmap-sa # assign a service account with read, write permissions on configmaps calendar : example-with-catch-up : # Catchup the missed events from last Event timestamp. last event will be persisted in configmap. schedule : \"* * * * *\" persistence : catchup : enabled : true # Check missed schedules from last persisted event time on every start maxDuration : 5m # maximum amount of duration go back for the catch-up configMap : # Configmap for persist the last successful event timestamp createIfNotExist : true name : test-configmap Last calender event persisted in configured configmap. Same configmap can be used by multiple events configuration. data : calendar.example-with-catch-up : '{\"eventTime\":\"2020-10-19 22:50:00.0003192 +0000 UTC m=+683.567066901\"}'","title":"Enable Catch-up for Calendar EventSource"},{"location":"eventsources/calendar-catch-up/#service-account","text":"To make Calendar EventSource catch-up work, a Service Account with proper RBAC settings needs to be provided. If the configMap is not existing, and createIfNotExist: true is set, a Service Account bound with following Role is required. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : example-configmap-access-role rules : - apiGroups : - \"\" resources : - configmaps verbs : - get - create - update If the configmap is already existing, create can be removed from the verbs list.","title":"Service Account"},{"location":"eventsources/calendar-catch-up/#disable-the-catchup","text":"Set false to catchup-->enabled element catchup : enabled : false","title":"Disable the catchup"},{"location":"eventsources/filtering/","text":"Filtering EventSources \u00b6 When event sources watch events from external data sources (ie. Kafka topics), it will ingest all messages. With filtering, we are able to apply constraints and determine if the event should be published or skipped. This is achieved by evaluating an expression in the EventSource spec. Fields \u00b6 A filter in an example Kafka EventSource: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : kafka spec : kafka : example : url : kafka.argo-events:9092 topic : topic-2 jsonBody : true partition : \"1\" filter : # filter field expression : \"(body.id == 4) && (body.name != 'Joe')\" #expression to be evaluated connectionBackoff : duration : 10s steps : 5 factor : 2 jitter : 0.2 The expression string is evaluated with the expr package which offers a wide set of basic operators and comparators. Example \u00b6 Creating a Kafka EventSource with filter field present kubectl apply -f examples/event-sources/kafka.yaml -n argo-events Sending an event with passing filter conditions to kafka echo '{\"id\": 4,\"name\": \"John\", \"email\": \"john@intuit.com\", \"department\":{\"id\": 1,\"name\": \"HR\",\"bu\":{\"id\": 2,\"name\" : \"devp\"}}}' | kcat -b localhost:9092 -P -t topic-2 Sending an event with failing filter conditions echo '{\"id\": 2,\"name\": \"Johnson\", \"email\": \"john@intuit.com\", \"department\":{\"id\": 1,\"name\": \"HR\",\"bu\":{\"id\": 2,\"name\" : \"devp\"}}}' | kcat -b localhost:9092 -P -t topic-2 Output \u00b6 Successful logs from kafka event source pod: {\"level\":\"info\",\"ts\":1644017495.0711913,\"logger\":\"argo-events.eventsource\",\"caller\":\"kafka/start.go:217\",\"msg\":\"dispatching event on the data channel...\",\"eventSourceName\":\"kafka\",\"eventSourceType\":\"kafka\",\"eventName\":\"example\",\"partition-id\":\"0\"} {\"level\":\"info\",\"ts\":1644017495.1374986,\"logger\":\"argo-events.eventsource\",\"caller\":\"eventsources/eventing.go:514\",\"msg\":\"succeeded to publish an event\",\"eventSourceName\":\"kafka\",\"eventName\":\"example\",\"eventSourceType\":\"kafka\",\"eventID\":\"kafka:example:kafka-broker:9092:topic-2:0:7\"}","title":"Filtering EventSources"},{"location":"eventsources/filtering/#filtering-eventsources","text":"When event sources watch events from external data sources (ie. Kafka topics), it will ingest all messages. With filtering, we are able to apply constraints and determine if the event should be published or skipped. This is achieved by evaluating an expression in the EventSource spec.","title":"Filtering EventSources"},{"location":"eventsources/filtering/#fields","text":"A filter in an example Kafka EventSource: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : kafka spec : kafka : example : url : kafka.argo-events:9092 topic : topic-2 jsonBody : true partition : \"1\" filter : # filter field expression : \"(body.id == 4) && (body.name != 'Joe')\" #expression to be evaluated connectionBackoff : duration : 10s steps : 5 factor : 2 jitter : 0.2 The expression string is evaluated with the expr package which offers a wide set of basic operators and comparators.","title":"Fields"},{"location":"eventsources/filtering/#example","text":"Creating a Kafka EventSource with filter field present kubectl apply -f examples/event-sources/kafka.yaml -n argo-events Sending an event with passing filter conditions to kafka echo '{\"id\": 4,\"name\": \"John\", \"email\": \"john@intuit.com\", \"department\":{\"id\": 1,\"name\": \"HR\",\"bu\":{\"id\": 2,\"name\" : \"devp\"}}}' | kcat -b localhost:9092 -P -t topic-2 Sending an event with failing filter conditions echo '{\"id\": 2,\"name\": \"Johnson\", \"email\": \"john@intuit.com\", \"department\":{\"id\": 1,\"name\": \"HR\",\"bu\":{\"id\": 2,\"name\" : \"devp\"}}}' | kcat -b localhost:9092 -P -t topic-2","title":"Example"},{"location":"eventsources/filtering/#output","text":"Successful logs from kafka event source pod: {\"level\":\"info\",\"ts\":1644017495.0711913,\"logger\":\"argo-events.eventsource\",\"caller\":\"kafka/start.go:217\",\"msg\":\"dispatching event on the data channel...\",\"eventSourceName\":\"kafka\",\"eventSourceType\":\"kafka\",\"eventName\":\"example\",\"partition-id\":\"0\"} {\"level\":\"info\",\"ts\":1644017495.1374986,\"logger\":\"argo-events.eventsource\",\"caller\":\"eventsources/eventing.go:514\",\"msg\":\"succeeded to publish an event\",\"eventSourceName\":\"kafka\",\"eventName\":\"example\",\"eventSourceType\":\"kafka\",\"eventID\":\"kafka:example:kafka-broker:9092:topic-2:0:7\"}","title":"Output"},{"location":"eventsources/gcp-pubsub/","text":"GCP PubSub \u00b6 Topic And Subscription ID \u00b6 GCP PubSub event source can listen to a PubSub with given topic , or subscriptionID . Here is the logic with different topic and subscriptionID combination. Topic Provided/Existing Sub ID Provided/Existing Actions Yes/Yes Yes/Yes Validate if given topic matches subscription's topic Yes/Yes Yes/No Create a subscription with given ID Yes/Yes No/- Create or re-use subscription with auto generated subID Yes/No Yes/No Create a topic and a subscription with given subID Yes/No Yes/Yes Invalid Yes/No No/- Create a topic, create or re-use subscription w/ auto generated subID No/- Yes/Yes OK No/- Yes/No Invalid Workload Identity \u00b6 If you have configured Workload Identity and want to use it for a PubSub EventSource, leave credentialSecret nil. Full spec is available here . See a PubSub EventSource example . Running With PubSub Emulator \u00b6 You can point this event source at the PubSub Emulator by configuring the PUBSUB_EMULATOR_HOST environment variable for the event source pod. This can be configured on the EventSource resource under the spec.template.container.env key. This option is also documented in the PubSub EventSource example .","title":"GCP PubSub"},{"location":"eventsources/gcp-pubsub/#gcp-pubsub","text":"","title":"GCP PubSub"},{"location":"eventsources/gcp-pubsub/#topic-and-subscription-id","text":"GCP PubSub event source can listen to a PubSub with given topic , or subscriptionID . Here is the logic with different topic and subscriptionID combination. Topic Provided/Existing Sub ID Provided/Existing Actions Yes/Yes Yes/Yes Validate if given topic matches subscription's topic Yes/Yes Yes/No Create a subscription with given ID Yes/Yes No/- Create or re-use subscription with auto generated subID Yes/No Yes/No Create a topic and a subscription with given subID Yes/No Yes/Yes Invalid Yes/No No/- Create a topic, create or re-use subscription w/ auto generated subID No/- Yes/Yes OK No/- Yes/No Invalid","title":"Topic And Subscription ID"},{"location":"eventsources/gcp-pubsub/#workload-identity","text":"If you have configured Workload Identity and want to use it for a PubSub EventSource, leave credentialSecret nil. Full spec is available here . See a PubSub EventSource example .","title":"Workload Identity"},{"location":"eventsources/gcp-pubsub/#running-with-pubsub-emulator","text":"You can point this event source at the PubSub Emulator by configuring the PUBSUB_EMULATOR_HOST environment variable for the event source pod. This can be configured on the EventSource resource under the spec.template.container.env key. This option is also documented in the PubSub EventSource example .","title":"Running With PubSub Emulator"},{"location":"eventsources/generic/","text":"Generic EventSource \u00b6 Generic eventsource extends Argo-Events eventsources via a simple gRPC contract. This is specifically useful when you want to onboard a custom eventsource implementation. Contract \u00b6 In order to qualify as generic eventsource, the eventsource server needs to implement following gRPC contract. syntax = \"proto3\" ; package generic ; service Eventing { rpc StartEventSource ( EventSource ) returns ( stream Event ); } message EventSource { // The event source name . string name = 1 ; // The event source configuration value . bytes config = 2 ; } /** * Represents an event */ message Event { // The event source name . string name = 1 ; // The event payload . bytes payload = 2 ; } The proto file is available here . Architecture \u00b6 Consider a generic eventsource. apiVersion: argoproj.io/v1alpha1 kind: EventSource metadata: name: generic spec: generic: example: insecure: true url: \"generic-event-source-server.argo-events.svc:8080\" config: |- key1: value1 key2: value2 The values placed under config field follows a free-form style and Argo-Events eventsource client is not opinionated about them. Although, it is expected that the eventsource server implemented by the user is able to parse the configuration. Flow \u00b6 The eventsource client connects to the server via the url defined under eventsource spec and sends over the configuration defined under config over an RPC call. The eventsource server then parses the configuration and connects to any external source if required to consume the events. The eventsource server can produce events without connecting to any external source, e.g. a special implementation of calendar events. The events from eventsource server are streamed back to the client. Client then writes the events to the eventbus which are read by the sensor to trigger the workflows. Connection Strategy \u00b6 The eventsource client performs indefinite retries to connect to the eventsource server and receives events over a stream upon successful connection. This also applies when the eventsource server goes down.","title":"Generic EventSource"},{"location":"eventsources/generic/#generic-eventsource","text":"Generic eventsource extends Argo-Events eventsources via a simple gRPC contract. This is specifically useful when you want to onboard a custom eventsource implementation.","title":"Generic EventSource"},{"location":"eventsources/generic/#contract","text":"In order to qualify as generic eventsource, the eventsource server needs to implement following gRPC contract. syntax = \"proto3\" ; package generic ; service Eventing { rpc StartEventSource ( EventSource ) returns ( stream Event ); } message EventSource { // The event source name . string name = 1 ; // The event source configuration value . bytes config = 2 ; } /** * Represents an event */ message Event { // The event source name . string name = 1 ; // The event payload . bytes payload = 2 ; } The proto file is available here .","title":"Contract"},{"location":"eventsources/generic/#architecture","text":"Consider a generic eventsource. apiVersion: argoproj.io/v1alpha1 kind: EventSource metadata: name: generic spec: generic: example: insecure: true url: \"generic-event-source-server.argo-events.svc:8080\" config: |- key1: value1 key2: value2 The values placed under config field follows a free-form style and Argo-Events eventsource client is not opinionated about them. Although, it is expected that the eventsource server implemented by the user is able to parse the configuration.","title":"Architecture"},{"location":"eventsources/generic/#flow","text":"The eventsource client connects to the server via the url defined under eventsource spec and sends over the configuration defined under config over an RPC call. The eventsource server then parses the configuration and connects to any external source if required to consume the events. The eventsource server can produce events without connecting to any external source, e.g. a special implementation of calendar events. The events from eventsource server are streamed back to the client. Client then writes the events to the eventbus which are read by the sensor to trigger the workflows.","title":"Flow"},{"location":"eventsources/generic/#connection-strategy","text":"The eventsource client performs indefinite retries to connect to the eventsource server and receives events over a stream upon successful connection. This also applies when the eventsource server goes down.","title":"Connection Strategy"},{"location":"eventsources/ha/","text":"EventSource High Availability \u00b6 EventSource controller creates a k8s deployment (replica number defaults to 1) for each EventSource object to watch the events. HA can be achieved by setting spec.replicas to a number greater than 1. Some types of the event sources do not allow multiple live clients with same attributes (i.e. multiple clients with same clientID connecting to a NATS server), or multiple event source PODs will generate duplicated events to downstream, so the HA strategies are different for different event sources. Please DO NOT manually scale up the replicas, that might cause unexpected behaviors! Active-Active \u00b6 Active-Active strategy is applied to the following EventSource types. AWS SNS AWS SQS Bitbucket Bitbucket Server GitHub GitLab NetApp Storage GRID Slack Stripe Webhook When spec.replicas is set to N (N > 1), all the N Pods serve traffic. Active-Passive \u00b6 If following EventSource types have spec.replicas > 1 , Active-Passive strategy is used, which means only one Pod serves traffic and the rest ones stand by. One of standby Pods will be automatically elected to be active if the old one is gone. AMQP Azure Events Hub Calendar Emitter GCP PubSub Generic File HDFS Kafka Minio MQTT NATS NSQ Pulsar Redis Resource Kubernetes Leader Election \u00b6 By default, Argo Events will use NATS for the HA leader election except when using a Kafka Eventbus, in which case a kubernetes leader election will be used. If using a different EventBus you can opt-in to a Kubernetes native leader election by specifying the following annotation. annotations : events.argoproj.io/leader-election : k8s To use Kubernetes leader election the following RBAC rules need to be associated with the EventSource ServiceAccount. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : argo-events-leaderelection-role rules : - apiGroups : [ \"coordination.k8s.io\" ] resources : [ \"leases\" ] verbs : [ \"get\" , \"create\" , \"update\" ] More \u00b6 Click here to learn more information about Argo Events DR/HA recommendations.","title":"EventSource High Availability"},{"location":"eventsources/ha/#eventsource-high-availability","text":"EventSource controller creates a k8s deployment (replica number defaults to 1) for each EventSource object to watch the events. HA can be achieved by setting spec.replicas to a number greater than 1. Some types of the event sources do not allow multiple live clients with same attributes (i.e. multiple clients with same clientID connecting to a NATS server), or multiple event source PODs will generate duplicated events to downstream, so the HA strategies are different for different event sources. Please DO NOT manually scale up the replicas, that might cause unexpected behaviors!","title":"EventSource High Availability"},{"location":"eventsources/ha/#active-active","text":"Active-Active strategy is applied to the following EventSource types. AWS SNS AWS SQS Bitbucket Bitbucket Server GitHub GitLab NetApp Storage GRID Slack Stripe Webhook When spec.replicas is set to N (N > 1), all the N Pods serve traffic.","title":"Active-Active"},{"location":"eventsources/ha/#active-passive","text":"If following EventSource types have spec.replicas > 1 , Active-Passive strategy is used, which means only one Pod serves traffic and the rest ones stand by. One of standby Pods will be automatically elected to be active if the old one is gone. AMQP Azure Events Hub Calendar Emitter GCP PubSub Generic File HDFS Kafka Minio MQTT NATS NSQ Pulsar Redis Resource","title":"Active-Passive"},{"location":"eventsources/ha/#kubernetes-leader-election","text":"By default, Argo Events will use NATS for the HA leader election except when using a Kafka Eventbus, in which case a kubernetes leader election will be used. If using a different EventBus you can opt-in to a Kubernetes native leader election by specifying the following annotation. annotations : events.argoproj.io/leader-election : k8s To use Kubernetes leader election the following RBAC rules need to be associated with the EventSource ServiceAccount. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : argo-events-leaderelection-role rules : - apiGroups : [ \"coordination.k8s.io\" ] resources : [ \"leases\" ] verbs : [ \"get\" , \"create\" , \"update\" ]","title":"Kubernetes Leader Election"},{"location":"eventsources/ha/#more","text":"Click here to learn more information about Argo Events DR/HA recommendations.","title":"More"},{"location":"eventsources/multiple-events/","text":"EventSource With Multiple Events \u00b6 v0.17.0 and after Multiple events can be configured in a single EventSource, they can be either one event source type, or mixed event source types with some limitations. Single EventSource Type \u00b6 A single type EventSource configuration: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example method : POST example-foo : port : \"13000\" endpoint : /example2 method : POST For the example above, there are 2 events configured in the EventSource named webhook . Mixed EventSource Types \u00b6 EventSource is allowed to have mixed types of events configured. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : mixed-sources spec : webhook : webhook-example : # eventName port : \"12000\" endpoint : /example method : POST sns : sns-example : # eventName topicArn : arn:aws:sns:us-east-1:XXXXXXXX:test webhook : endpoint : \"/\" port : \"15000\" accessKey : key : my-key name : my-name secretKey : key : my-secret-key name : my-secret-name region : us-east-1 However, there are some rules need to follow to do it: EventSource types with Active-Active HA strategy can not be mixed with types with Active-Passive strategy, for EventSource types, see EventSource High Availability for the detail. Event Name (i.e. webhook-example and sns-example above, refer to EventSource Names ) needs to be unique in the EventSource, same eventName is not allowed even they are in different event source types. The reason for that is, we use eventSourceName and eventName as the dependency attributes in Sensor.","title":"EventSource With Multiple Events"},{"location":"eventsources/multiple-events/#eventsource-with-multiple-events","text":"v0.17.0 and after Multiple events can be configured in a single EventSource, they can be either one event source type, or mixed event source types with some limitations.","title":"EventSource With Multiple Events"},{"location":"eventsources/multiple-events/#single-eventsource-type","text":"A single type EventSource configuration: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example method : POST example-foo : port : \"13000\" endpoint : /example2 method : POST For the example above, there are 2 events configured in the EventSource named webhook .","title":"Single EventSource Type"},{"location":"eventsources/multiple-events/#mixed-eventsource-types","text":"EventSource is allowed to have mixed types of events configured. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : mixed-sources spec : webhook : webhook-example : # eventName port : \"12000\" endpoint : /example method : POST sns : sns-example : # eventName topicArn : arn:aws:sns:us-east-1:XXXXXXXX:test webhook : endpoint : \"/\" port : \"15000\" accessKey : key : my-key name : my-name secretKey : key : my-secret-key name : my-secret-name region : us-east-1 However, there are some rules need to follow to do it: EventSource types with Active-Active HA strategy can not be mixed with types with Active-Passive strategy, for EventSource types, see EventSource High Availability for the detail. Event Name (i.e. webhook-example and sns-example above, refer to EventSource Names ) needs to be unique in the EventSource, same eventName is not allowed even they are in different event source types. The reason for that is, we use eventSourceName and eventName as the dependency attributes in Sensor.","title":"Mixed EventSource Types"},{"location":"eventsources/naming/","text":"EventSource Names \u00b6 In a Sensor object, a dependency is defined as: dependencies : - name : test-dep eventSourceName : webhook-example eventName : example The eventSourceName and eventName might be confusing. Take the following EventSource example, the eventSourceName and eventName are described as below. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook-example # eventSourceName spec : webhook : example : # eventName port : \"12000\" endpoint : /example method : POST example-foo : # eventName port : \"13000\" endpoint : /example2 method : POST EventSourceName \u00b6 eventSourceName is the name of the dependent EventSource object, i.e. webhook-example in the example above. EventName \u00b6 eventName is the map key of a configured event. In the example above, eventName could be example or example-foo .","title":"EventSource Names"},{"location":"eventsources/naming/#eventsource-names","text":"In a Sensor object, a dependency is defined as: dependencies : - name : test-dep eventSourceName : webhook-example eventName : example The eventSourceName and eventName might be confusing. Take the following EventSource example, the eventSourceName and eventName are described as below. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook-example # eventSourceName spec : webhook : example : # eventName port : \"12000\" endpoint : /example method : POST example-foo : # eventName port : \"13000\" endpoint : /example2 method : POST","title":"EventSource Names"},{"location":"eventsources/naming/#eventsourcename","text":"eventSourceName is the name of the dependent EventSource object, i.e. webhook-example in the example above.","title":"EventSourceName"},{"location":"eventsources/naming/#eventname","text":"eventName is the map key of a configured event. In the example above, eventName could be example or example-foo .","title":"EventName"},{"location":"eventsources/services/","text":"EventSource Services \u00b6 Some of the EventSources ( webhook , github , gitlab , sns , slack , Storage GRID and stripe ) start an HTTP service to receive the events, for your convenience, there is a field named service within EventSource spec can help you create a ClusterIP service for testing. For example: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : service : ports : - port : 12000 targetPort : 12000 webhook : example : port : \"12000\" endpoint : /example method : POST However, the generated service is ONLY for testing purpose, if you want to expose the endpoint for external access, please manage it by using native K8s objects (i.e. a Load Balancer type Service, or an Ingress), and remove service field from the EventSource object. For example, you can create a K8s service with the selector eventsource-name: webhook to select pods created for the \"webhook\" event source, like the following: apiVersion : v1 kind : Service metadata : name : webhook-eventsource spec : ports : - port : 12000 protocol : TCP targetPort : 12000 selector : eventsource-name : webhook type : NodePort Then you can expose the service for external access using native K8s objects as mentioned above. You can refer to webhook heath check if you need a health check endpoint for LB Service or Ingress configuration.","title":"EventSource Services"},{"location":"eventsources/services/#eventsource-services","text":"Some of the EventSources ( webhook , github , gitlab , sns , slack , Storage GRID and stripe ) start an HTTP service to receive the events, for your convenience, there is a field named service within EventSource spec can help you create a ClusterIP service for testing. For example: apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : service : ports : - port : 12000 targetPort : 12000 webhook : example : port : \"12000\" endpoint : /example method : POST However, the generated service is ONLY for testing purpose, if you want to expose the endpoint for external access, please manage it by using native K8s objects (i.e. a Load Balancer type Service, or an Ingress), and remove service field from the EventSource object. For example, you can create a K8s service with the selector eventsource-name: webhook to select pods created for the \"webhook\" event source, like the following: apiVersion : v1 kind : Service metadata : name : webhook-eventsource spec : ports : - port : 12000 protocol : TCP targetPort : 12000 selector : eventsource-name : webhook type : NodePort Then you can expose the service for external access using native K8s objects as mentioned above. You can refer to webhook heath check if you need a health check endpoint for LB Service or Ingress configuration.","title":"EventSource Services"},{"location":"eventsources/webhook-authentication/","text":"Webhook Authentication \u00b6 v1.0 and after For webhook event source, if you want to get your endpoint protected from unauthorized accessing, you can specify authSecret to the spec, which is a K8s secret key selector. This simple authentication approach also works for webhook extended event sources, if that event source does not have a built in authenticator. Firstly, create a k8s secret containing your token. echo -n 'af3qqs321f2ddwf1e2e67dfda3fs' > ./token.txt kubectl create secret generic my-webhook-token --from-file = my-token = ./token.txt Then add authSecret to your webhook EventSource. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example method : POST authSecret : name : my-webhook-token key : my-token Now you can authenticate your webhook endpoint with the configured token. TOKEN = \"Bearer af3qqs321f2ddwf1e2e67dfda3fs\" curl -X POST -H \"Authorization: $TOKEN \" -d \"{your data}\" http://xxxxx:12000/example","title":"Webhook Authentication"},{"location":"eventsources/webhook-authentication/#webhook-authentication","text":"v1.0 and after For webhook event source, if you want to get your endpoint protected from unauthorized accessing, you can specify authSecret to the spec, which is a K8s secret key selector. This simple authentication approach also works for webhook extended event sources, if that event source does not have a built in authenticator. Firstly, create a k8s secret containing your token. echo -n 'af3qqs321f2ddwf1e2e67dfda3fs' > ./token.txt kubectl create secret generic my-webhook-token --from-file = my-token = ./token.txt Then add authSecret to your webhook EventSource. apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example method : POST authSecret : name : my-webhook-token key : my-token Now you can authenticate your webhook endpoint with the configured token. TOKEN = \"Bearer af3qqs321f2ddwf1e2e67dfda3fs\" curl -X POST -H \"Authorization: $TOKEN \" -d \"{your data}\" http://xxxxx:12000/example","title":"Webhook Authentication"},{"location":"eventsources/webhook-health-check/","text":"Webhook Health Check \u00b6 For webhook or webhook extended event sources such as github , gitlab , sns , slack , Storage GRID and stripe , besides the endpoint configured in the spec, an extra endpoint :${port}/health will also be created, this is useful for LB or Ingress configuration for the event source, where usually a health check endpoint is required. For example, the following EventSource object will have 4 endpoints created, :12000/example1 , :12000/health , :13000/example2 and :13000/health . An HTTP GET request to the health endpoint returns a text OK with HTTP response code 200 . apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example1 method : POST example-foo : port : \"13000\" endpoint : /example2 method : POST","title":"Webhook Health Check"},{"location":"eventsources/webhook-health-check/#webhook-health-check","text":"For webhook or webhook extended event sources such as github , gitlab , sns , slack , Storage GRID and stripe , besides the endpoint configured in the spec, an extra endpoint :${port}/health will also be created, this is useful for LB or Ingress configuration for the event source, where usually a health check endpoint is required. For example, the following EventSource object will have 4 endpoints created, :12000/example1 , :12000/health , :13000/example2 and :13000/health . An HTTP GET request to the health endpoint returns a text OK with HTTP response code 200 . apiVersion : argoproj.io/v1alpha1 kind : EventSource metadata : name : webhook spec : webhook : example : port : \"12000\" endpoint : /example1 method : POST example-foo : port : \"13000\" endpoint : /example2 method : POST","title":"Webhook Health Check"},{"location":"eventsources/setup/amqp/","text":"AMQP \u00b6 AMQP event-source listens to messages on the MQ and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"contentType\" : \"ContentType is the MIME content type\" , \"contentEncoding\" : \"ContentEncoding is the MIME content encoding\" , \"deliveryMode\" : \"Delivery mode can be either - non-persistent (1) or persistent (2)\" , \"priority\" : \"Priority refers to the use - 0 to 9\" , \"correlationId\" : \"CorrelationId is the correlation identifier\" , \"replyTo\" : \"ReplyTo is the address to reply to (ex: RPC)\" , \"expiration\" : \"Expiration refers to message expiration spec\" , \"messageId\" : \"MessageId is message identifier\" , \"timestamp\" : \"Timestamp refers to the message timestamp\" , \"type\" : \"Type refers to the message type name\" , \"appId\" : \"AppId refers to the application id\" , \"exchange\" : \"Exchange is basic.publish exchange\" , \"routingKey\" : \"RoutingKey is basic.publish routing key\" , \"body\" : \"Body represents the message body\" , } } Setup \u00b6 Lets set up RabbitMQ locally. apiVersion : v1 kind : Service metadata : labels : component : rabbitmq name : rabbitmq - service spec : ports : - port : 5672 selector : app : taskQueue component : rabbitmq --- apiVersion : v1 kind : ReplicationController metadata : labels : component : rabbitmq name : rabbitmq - controller spec : replicas : 1 template : metadata : labels : app : taskQueue component : rabbitmq spec : containers : - image : rabbitmq name : rabbitmq ports : - containerPort : 5672 resources : limits : cpu : 100 m Make sure the RabbitMQ controller pod is up and running before proceeding further. Expose the RabbitMQ server to local publisher using port-forward . kubectl -n argo-events port-forward 5672:5672 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/amqp.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the exchange specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/amqp.yaml Lets set up a RabbitMQ publisher. If you don't have pika installed, run. python -m pip install pika --upgrade Open a python REPL and run following code to publish a message on exchange called test . import pika connection = pika . BlockingConnection ( pika . ConnectionParameters ( 'localhost' )) channel = connection . channel () channel . basic_publish ( exchange = 'test' , routing_key = 'hello' , body = '{\"message\": \"hello\"}' ) As soon as you publish a message, sensor will trigger an Argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"AMQP"},{"location":"eventsources/setup/amqp/#amqp","text":"AMQP event-source listens to messages on the MQ and helps sensor trigger the workloads.","title":"AMQP"},{"location":"eventsources/setup/amqp/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"contentType\" : \"ContentType is the MIME content type\" , \"contentEncoding\" : \"ContentEncoding is the MIME content encoding\" , \"deliveryMode\" : \"Delivery mode can be either - non-persistent (1) or persistent (2)\" , \"priority\" : \"Priority refers to the use - 0 to 9\" , \"correlationId\" : \"CorrelationId is the correlation identifier\" , \"replyTo\" : \"ReplyTo is the address to reply to (ex: RPC)\" , \"expiration\" : \"Expiration refers to message expiration spec\" , \"messageId\" : \"MessageId is message identifier\" , \"timestamp\" : \"Timestamp refers to the message timestamp\" , \"type\" : \"Type refers to the message type name\" , \"appId\" : \"AppId refers to the application id\" , \"exchange\" : \"Exchange is basic.publish exchange\" , \"routingKey\" : \"RoutingKey is basic.publish routing key\" , \"body\" : \"Body represents the message body\" , } }","title":"Event Structure"},{"location":"eventsources/setup/amqp/#setup","text":"Lets set up RabbitMQ locally. apiVersion : v1 kind : Service metadata : labels : component : rabbitmq name : rabbitmq - service spec : ports : - port : 5672 selector : app : taskQueue component : rabbitmq --- apiVersion : v1 kind : ReplicationController metadata : labels : component : rabbitmq name : rabbitmq - controller spec : replicas : 1 template : metadata : labels : app : taskQueue component : rabbitmq spec : containers : - image : rabbitmq name : rabbitmq ports : - containerPort : 5672 resources : limits : cpu : 100 m Make sure the RabbitMQ controller pod is up and running before proceeding further. Expose the RabbitMQ server to local publisher using port-forward . kubectl -n argo-events port-forward 5672:5672 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/amqp.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the exchange specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/amqp.yaml Lets set up a RabbitMQ publisher. If you don't have pika installed, run. python -m pip install pika --upgrade Open a python REPL and run following code to publish a message on exchange called test . import pika connection = pika . BlockingConnection ( pika . ConnectionParameters ( 'localhost' )) channel = connection . channel () channel . basic_publish ( exchange = 'test' , routing_key = 'hello' , body = '{\"message\": \"hello\"}' ) As soon as you publish a message, sensor will trigger an Argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/amqp/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/aws-sns/","text":"AWS SNS \u00b6 SNS event-source subscribes to AWS SNS topics, listens events and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : \"sns headers\" , \"body\" : \"body refers to the sns notification data\" , } } Setup \u00b6 Create a topic called test using aws cli or AWS SNS console. Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Deploy the secret. kubectl -n argo-events apply -f aws-secret.yaml The event-source for AWS SNS creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from AWS. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to update the URL in the configuration within the event-source. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/aws-sns.yaml Go to SNS settings on AWS and verify the webhook is registered. You can also check it by inspecting the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-sns.yaml Publish a message to the SNS topic, and it will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"AWS SNS"},{"location":"eventsources/setup/aws-sns/#aws-sns","text":"SNS event-source subscribes to AWS SNS topics, listens events and helps sensor trigger the workloads.","title":"AWS SNS"},{"location":"eventsources/setup/aws-sns/#event-structure","text":"The structure of an event dispatched by the event-source over eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : \"sns headers\" , \"body\" : \"body refers to the sns notification data\" , } }","title":"Event Structure"},{"location":"eventsources/setup/aws-sns/#setup","text":"Create a topic called test using aws cli or AWS SNS console. Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Deploy the secret. kubectl -n argo-events apply -f aws-secret.yaml The event-source for AWS SNS creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from AWS. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to update the URL in the configuration within the event-source. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/aws-sns.yaml Go to SNS settings on AWS and verify the webhook is registered. You can also check it by inspecting the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-sns.yaml Publish a message to the SNS topic, and it will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/aws-sns/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/aws-sqs/","text":"AWS SQS \u00b6 SQS event-source listens to messages on AWS SQS queue and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"messageId\" : \"message id\" , // Each message attribute consists of a Name, Type, and Value. For more information, // see Amazon SQS Message Attributes // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) // in the Amazon Simple Queue Service Developer Guide. \"messageAttributes\" : \"message attributes\" , \"body\" : \"Body is the message data\" , } } Setup \u00b6 Create a queue called test either using aws cli or AWS SQS management console. Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Deploy the secret. kubectl -n argo-events apply -f aws-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/aws-sqs.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the queue specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-sqs.yaml Dispatch a message on sqs queue. aws sqs send - message -- queue - url https : // sqs . us - east - 1 . amazonaws . com / XXXXX / test -- message - body '{\"message\": \"hello\"}' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"AWS SQS"},{"location":"eventsources/setup/aws-sqs/#aws-sqs","text":"SQS event-source listens to messages on AWS SQS queue and helps sensor trigger workloads.","title":"AWS SQS"},{"location":"eventsources/setup/aws-sqs/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"messageId\" : \"message id\" , // Each message attribute consists of a Name, Type, and Value. For more information, // see Amazon SQS Message Attributes // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) // in the Amazon Simple Queue Service Developer Guide. \"messageAttributes\" : \"message attributes\" , \"body\" : \"Body is the message data\" , } }","title":"Event Structure"},{"location":"eventsources/setup/aws-sqs/#setup","text":"Create a queue called test either using aws cli or AWS SQS management console. Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Deploy the secret. kubectl -n argo-events apply -f aws-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/aws-sqs.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the queue specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-sqs.yaml Dispatch a message on sqs queue. aws sqs send - message -- queue - url https : // sqs . us - east - 1 . amazonaws . com / XXXXX / test -- message - body '{\"message\": \"hello\"}' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/aws-sqs/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/azure-queue-storage/","text":"Azure Queue Storage \u00b6 Azure Queue Storage event-source allows you to consume messages from azure storage queues. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" \"time\" : \"event_time\" , }, \"data\" : { \"messageID\" : \"MessageID is the ID of the message\" , \"body\" : \"Body represents the message body\" , \"insertionTime\" : \"InsertionTime is the time the message was inserted into the queue\" , } } Setup \u00b6 Create a queue called test either using az cli or Azure storage management console. Fetch your connection string for Azure Queue Storage and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-queue-storage.yaml Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-queue-storage.yaml Dispatch a message to the queue. az storage message put -q test --content {\"message\": \"hello\"}' --account-name mystorageaccount --connection-string \"\" Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Azure Queue Storage"},{"location":"eventsources/setup/azure-queue-storage/#azure-queue-storage","text":"Azure Queue Storage event-source allows you to consume messages from azure storage queues.","title":"Azure Queue Storage"},{"location":"eventsources/setup/azure-queue-storage/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" \"time\" : \"event_time\" , }, \"data\" : { \"messageID\" : \"MessageID is the ID of the message\" , \"body\" : \"Body represents the message body\" , \"insertionTime\" : \"InsertionTime is the time the message was inserted into the queue\" , } }","title":"Event Structure"},{"location":"eventsources/setup/azure-queue-storage/#setup","text":"Create a queue called test either using az cli or Azure storage management console. Fetch your connection string for Azure Queue Storage and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-queue-storage.yaml Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-queue-storage.yaml Dispatch a message to the queue. az storage message put -q test --content {\"message\": \"hello\"}' --account-name mystorageaccount --connection-string \"\" Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/azure-queue-storage/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/azure-service-bus/","text":"Azure Service Bus \u00b6 Service Bus event-source allows you to consume messages from queus and topics in Azure Service Bus and helps sensor trigger workflows. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" \"time\" : \"event_time\" , }, \"data\" : { \"applicationProperties\" : \"ApplicationProperties can be used to store custom metadata for a message\" , \"body\" : \"Body represents the message body\" , \"contentType\" : \"ContentType is the MIME content type\" , \"correlationID\" : \"CorrelationID is the correlation identifier\" , \"enqueuedTime\" : \"EnqueuedTime is the time when the message was enqueued\" , \"messageID\" : \"ID of the message\" , \"replyTo\" : \"ReplyTo is an application-defined value specify a reply path to the receiver of the message\" , \"sequenceNumber\" : \"SequenceNumber is a unique number assigned to a message by Service Bus\" , \"subject\" : \"Subject enables an application to indicate the purpose of the message, similar to an email subject line\" , } } Setup \u00b6 Create a queue called test either using Azure CLI or Azure Service Bus management console. Fetch your connection string for Azure Service Bus and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-service-bus.yaml Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus.yaml Lets set up a Service Bus client. If you don't have azure-servicebus installed, run. python -m pip install azure-servicebus --upgrade Open a python REPL and run the following code to send a message on the queue called test . Before running the code, make sure you have the SERVICE_BUS_CONNECTION_STRING environment variable set. This is the connection string for your Azure Service Bus. import os , json from azure.servicebus import ServiceBusClient , ServiceBusMessage servicebus_client = ServiceBusClient . from_connection_string ( conn_str = os . environ [ 'SERVICE_BUS_CONNECTION_STRING' ]) with servicebus_client : sender = servicebus_client . get_queue_sender ( queue_name = \"test\" ) with sender : message = ServiceBusMessage ( '{\"hello\": \"world\"}' ) sender . send_messages ( message ) As soon as you publish a message, sensor will trigger an Argo workflow. Run argo list to find the workflow.","title":"Azure Service Bus"},{"location":"eventsources/setup/azure-service-bus/#azure-service-bus","text":"Service Bus event-source allows you to consume messages from queus and topics in Azure Service Bus and helps sensor trigger workflows.","title":"Azure Service Bus"},{"location":"eventsources/setup/azure-service-bus/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" \"time\" : \"event_time\" , }, \"data\" : { \"applicationProperties\" : \"ApplicationProperties can be used to store custom metadata for a message\" , \"body\" : \"Body represents the message body\" , \"contentType\" : \"ContentType is the MIME content type\" , \"correlationID\" : \"CorrelationID is the correlation identifier\" , \"enqueuedTime\" : \"EnqueuedTime is the time when the message was enqueued\" , \"messageID\" : \"ID of the message\" , \"replyTo\" : \"ReplyTo is an application-defined value specify a reply path to the receiver of the message\" , \"sequenceNumber\" : \"SequenceNumber is a unique number assigned to a message by Service Bus\" , \"subject\" : \"Subject enables an application to indicate the purpose of the message, similar to an email subject line\" , } }","title":"Event Structure"},{"location":"eventsources/setup/azure-service-bus/#setup","text":"Create a queue called test either using Azure CLI or Azure Service Bus management console. Fetch your connection string for Azure Service Bus and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-service-bus.yaml Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus.yaml Lets set up a Service Bus client. If you don't have azure-servicebus installed, run. python -m pip install azure-servicebus --upgrade Open a python REPL and run the following code to send a message on the queue called test . Before running the code, make sure you have the SERVICE_BUS_CONNECTION_STRING environment variable set. This is the connection string for your Azure Service Bus. import os , json from azure.servicebus import ServiceBusClient , ServiceBusMessage servicebus_client = ServiceBusClient . from_connection_string ( conn_str = os . environ [ 'SERVICE_BUS_CONNECTION_STRING' ]) with servicebus_client : sender = servicebus_client . get_queue_sender ( queue_name = \"test\" ) with sender : message = ServiceBusMessage ( '{\"hello\": \"world\"}' ) sender . send_messages ( message ) As soon as you publish a message, sensor will trigger an Argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/bitbucket/","text":"Bitbucket (Cloud) \u00b6 Bitbucket event-source programmatically configures webhooks for projects on Bitbucket and helps sensor trigger the workloads on events. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Bitbucket event payload\" , \"headers\" : \"Headers from the Bitbucket event\" , } } Specification \u00b6 Bitbucket event-source specification is available here . Example event-source yaml file is here . Setup \u00b6 NOTE: In this setup, we will use the basic auth strategy together with App password (there is also support for OAuth ). Create an App password if you don't have one. Follow instructions to create a new Bitbucket App password. Grant it the Webhooks - Read and Write permissions as well as any permissions that applies to the events that the webhook subscribes to (e.g. if you're using the example event-source yaml file which subscribes to repo:push event then you would also need to grant the Repositories - Read permission). Base64 encode your App password and your Bitbucket username. echo -n | base64 echo -n | base64 Create a secret called bitbucket-access that contains your encoded Bitbucket credentials. apiVersion : v1 kind : Secret metadata : name : bitbucket - access type : Opaque data : username : < base64 - encoded - username - from - previous - step > password : < base64 - encoded - password - from - previous - step > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f bitbucket-access.yaml The event-source for Bitbucket creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from Bitbucket. You can find more information on Ingress or Route online. Create the event source by running the following command. You can use the example event-source yaml file but make sure to replace the url field and to modify owner , repositorySlug and projectKey fields with your own repo. kubectl apply -n argo-events -f Go to Webhooks under your project settings on Bitbucket and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucket.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Bitbucket (Cloud)"},{"location":"eventsources/setup/bitbucket/#bitbucket-cloud","text":"Bitbucket event-source programmatically configures webhooks for projects on Bitbucket and helps sensor trigger the workloads on events.","title":"Bitbucket (Cloud)"},{"location":"eventsources/setup/bitbucket/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Bitbucket event payload\" , \"headers\" : \"Headers from the Bitbucket event\" , } }","title":"Event Structure"},{"location":"eventsources/setup/bitbucket/#specification","text":"Bitbucket event-source specification is available here . Example event-source yaml file is here .","title":"Specification"},{"location":"eventsources/setup/bitbucket/#setup","text":"NOTE: In this setup, we will use the basic auth strategy together with App password (there is also support for OAuth ). Create an App password if you don't have one. Follow instructions to create a new Bitbucket App password. Grant it the Webhooks - Read and Write permissions as well as any permissions that applies to the events that the webhook subscribes to (e.g. if you're using the example event-source yaml file which subscribes to repo:push event then you would also need to grant the Repositories - Read permission). Base64 encode your App password and your Bitbucket username. echo -n | base64 echo -n | base64 Create a secret called bitbucket-access that contains your encoded Bitbucket credentials. apiVersion : v1 kind : Secret metadata : name : bitbucket - access type : Opaque data : username : < base64 - encoded - username - from - previous - step > password : < base64 - encoded - password - from - previous - step > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f bitbucket-access.yaml The event-source for Bitbucket creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from Bitbucket. You can find more information on Ingress or Route online. Create the event source by running the following command. You can use the example event-source yaml file but make sure to replace the url field and to modify owner , repositorySlug and projectKey fields with your own repo. kubectl apply -n argo-events -f Go to Webhooks under your project settings on Bitbucket and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucket.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/bitbucket/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/bitbucketserver/","text":"Bitbucket Server \u00b6 Bitbucket Server event-source programmatically configures webhooks for projects on Bitbucket Server and helps sensor trigger the workloads on events. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Bitbucket Server event payload\" , \"headers\" : \"Headers from the Bitbucket Server event\" , } } Specification \u00b6 Bitbucket Server event-source specification is available here . Example event-source yaml file is here . Setup \u00b6 Create an API token if you don't have one. Follow instructions to create a new Bitbucket Server API Token. Grant it the Projects: Admin permissions. Base64 encode your API token key. echo -n | base64 Create a secret called bitbucketserver-access that contains your encoded Bitbucket Server API token. You can also include a secret key that is encoded with base64 for your webhook if any. apiVersion : v1 kind : Secret metadata : name : bitbucketserver - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > secret : < base64 - encoded - webhook - secret - key > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f bitbucketserver-access.yaml The event-source for Bitbucket Server creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Bitbucket Server. You can find more information on Ingress or Route online. Create the event source by running the following command. You can use the example event-source yaml file from here but make sure to replace the url field and to modify the repositories list with your own repos. kubectl apply -n argo-events -f Go to Webhooks under your project settings on Bitbucket Server and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucketserver.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Bitbucket Server"},{"location":"eventsources/setup/bitbucketserver/#bitbucket-server","text":"Bitbucket Server event-source programmatically configures webhooks for projects on Bitbucket Server and helps sensor trigger the workloads on events.","title":"Bitbucket Server"},{"location":"eventsources/setup/bitbucketserver/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Bitbucket Server event payload\" , \"headers\" : \"Headers from the Bitbucket Server event\" , } }","title":"Event Structure"},{"location":"eventsources/setup/bitbucketserver/#specification","text":"Bitbucket Server event-source specification is available here . Example event-source yaml file is here .","title":"Specification"},{"location":"eventsources/setup/bitbucketserver/#setup","text":"Create an API token if you don't have one. Follow instructions to create a new Bitbucket Server API Token. Grant it the Projects: Admin permissions. Base64 encode your API token key. echo -n | base64 Create a secret called bitbucketserver-access that contains your encoded Bitbucket Server API token. You can also include a secret key that is encoded with base64 for your webhook if any. apiVersion : v1 kind : Secret metadata : name : bitbucketserver - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > secret : < base64 - encoded - webhook - secret - key > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f bitbucketserver-access.yaml The event-source for Bitbucket Server creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Bitbucket Server. You can find more information on Ingress or Route online. Create the event source by running the following command. You can use the example event-source yaml file from here but make sure to replace the url field and to modify the repositories list with your own repos. kubectl apply -n argo-events -f Go to Webhooks under your project settings on Bitbucket Server and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucketserver.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/bitbucketserver/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/calendar/","text":"Calendar \u00b6 Calendar event-source generates events on either a cron schedule or an interval and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"eventTime\" : { /* UTC time of the event */ }, \"userPayload\" : { /* static payload available in the event source */ }, } } Specification \u00b6 Calendar event-source specification is available here . Setup \u00b6 Install the event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/calendar.yaml The event-source will generate events at every 10 seconds. Let's create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/calendar.yaml Once the sensor pod is in running state, wait for next interval to occur for sensor to trigger workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Calendar"},{"location":"eventsources/setup/calendar/#calendar","text":"Calendar event-source generates events on either a cron schedule or an interval and helps sensor trigger workloads.","title":"Calendar"},{"location":"eventsources/setup/calendar/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"eventTime\" : { /* UTC time of the event */ }, \"userPayload\" : { /* static payload available in the event source */ }, } }","title":"Event Structure"},{"location":"eventsources/setup/calendar/#specification","text":"Calendar event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/calendar/#setup","text":"Install the event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/calendar.yaml The event-source will generate events at every 10 seconds. Let's create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/calendar.yaml Once the sensor pod is in running state, wait for next interval to occur for sensor to trigger workflow.","title":"Setup"},{"location":"eventsources/setup/calendar/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/emitter/","text":"Emitter \u00b6 Emitter event-source subscribes to a channel and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"name_of_the_topic\" , \"body\" : \"message_payload\" } } Specification \u00b6 Emitter event-source specification is available here . Setup \u00b6 Deploy the emitter in your local K8s cluster. --- apiVersion: v1 kind: Service metadata: name: broker labels: app: broker spec: clusterIP: None ports: - port: 4000 targetPort: 4000 selector: app: broker --- apiVersion: apps/v1 kind: Deployment metadata: name: broker spec: replicas: 1 selector: matchLabels: app: broker template: metadata: labels: app: broker spec: containers: - env: - name: EMITTER_LICENSE value: \"zT83oDV0DWY5_JysbSTPTDr8KB0AAAAAAAAAAAAAAAI\" # This is a test license, DO NOT USE IN PRODUCTION! - name: EMITTER_CLUSTER_SEED value: \"broker\" - name: EMITTER_CLUSTER_ADVERTISE value: \"private:4000\" name: broker image: emitter/server:latest ports: - containerPort: 8080 - containerPort: 443 - containerPort: 4000 volumeMounts: - name: broker-volume mountPath: /data volumes: - name: broker-volume hostPath: path: /emitter #directory on host Create the event-source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/emitter.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the topic specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/emitter.yaml Send a message on emitter channel using one of the clients https://emitter.io/develop/golang/ . Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Emitter"},{"location":"eventsources/setup/emitter/#emitter","text":"Emitter event-source subscribes to a channel and helps sensor trigger the workloads.","title":"Emitter"},{"location":"eventsources/setup/emitter/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"name_of_the_topic\" , \"body\" : \"message_payload\" } }","title":"Event Structure"},{"location":"eventsources/setup/emitter/#specification","text":"Emitter event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/emitter/#setup","text":"Deploy the emitter in your local K8s cluster. --- apiVersion: v1 kind: Service metadata: name: broker labels: app: broker spec: clusterIP: None ports: - port: 4000 targetPort: 4000 selector: app: broker --- apiVersion: apps/v1 kind: Deployment metadata: name: broker spec: replicas: 1 selector: matchLabels: app: broker template: metadata: labels: app: broker spec: containers: - env: - name: EMITTER_LICENSE value: \"zT83oDV0DWY5_JysbSTPTDr8KB0AAAAAAAAAAAAAAAI\" # This is a test license, DO NOT USE IN PRODUCTION! - name: EMITTER_CLUSTER_SEED value: \"broker\" - name: EMITTER_CLUSTER_ADVERTISE value: \"private:4000\" name: broker image: emitter/server:latest ports: - containerPort: 8080 - containerPort: 443 - containerPort: 4000 volumeMounts: - name: broker-volume mountPath: /data volumes: - name: broker-volume hostPath: path: /emitter #directory on host Create the event-source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/emitter.yaml Inspect the event-source pod logs to make sure it was able to subscribe to the topic specified in the event source to consume messages. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/emitter.yaml Send a message on emitter channel using one of the clients https://emitter.io/develop/golang/ . Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/emitter/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/file/","text":"File \u00b6 File event-source listens to file system events and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"name\" : \"Relative path to the file or directory\" , \"op\" : \"File operation that triggered the event\" // Create, Write, Remove, Rename, Chmod } } Specification \u00b6 File event-source specification is available here . Setup \u00b6 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/file.yaml The event source has configuration to listen to file system events for test-data directory and file called x.txt . Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/file.yaml Log into the event-source pod by running following command. kubectl - n argo - events exec - it < event - source - pod - name > - c file - events -- / bin / bash Let's create a file called x.txt under test-data directory in the event-source pod. cd test-data cat < x.txt hello EOF Once you create file x.txt , the sensor will trigger argo workflow. Run argo list to find the workflow. For real-world use cases, you should use PersistentVolumeClaim. Troubleshoot \u00b6 Please read the FAQ .","title":"File"},{"location":"eventsources/setup/file/#file","text":"File event-source listens to file system events and helps sensor trigger workloads.","title":"File"},{"location":"eventsources/setup/file/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"name\" : \"Relative path to the file or directory\" , \"op\" : \"File operation that triggered the event\" // Create, Write, Remove, Rename, Chmod } }","title":"Event Structure"},{"location":"eventsources/setup/file/#specification","text":"File event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/file/#setup","text":"Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/file.yaml The event source has configuration to listen to file system events for test-data directory and file called x.txt . Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/file.yaml Log into the event-source pod by running following command. kubectl - n argo - events exec - it < event - source - pod - name > - c file - events -- / bin / bash Let's create a file called x.txt under test-data directory in the event-source pod. cd test-data cat < x.txt hello EOF Once you create file x.txt , the sensor will trigger argo workflow. Run argo list to find the workflow. For real-world use cases, you should use PersistentVolumeClaim.","title":"Setup"},{"location":"eventsources/setup/file/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/gcp-pub-sub/","text":"GCP Pub/Sub \u00b6 GCP Pub/Sub event-source subscribes to messages published by GCP publisher and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"id\" : \"message id\" , // Attributes represents the key-value pairs the current message // is labelled with. \"attributes\" : \"key-values\" , \"publishTime\" : \"// The time at which the message was published\" , \"body\" : \"body refers to the message data\" , } } Specification \u00b6 GCP Pub/Sub event-source specification is available here . Setup \u00b6 Fetch the project credentials JSON file from GCP console. If you use Workload Identity, you can skip this and next steps. Create a K8s secret called gcp-credentials to store the credentials file. apiVersion : v1 data : key . json : < YOUR_CREDENTIALS_STRING_FROM_JSON_FILE > kind : Secret metadata : name : gcp - credentials namespace : argo - events type : Opaque Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/gcp-pubsub.yaml If you use Workload Identity, omit credentialSecret field. Instead don't forget to configure appropriate service account (see example ). Inspect the event-source pod logs to make sure it was able to subscribe to the topic. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gcp-pubsub.yaml Publish a message from GCP Pub/Sub console. Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Subscription, topic and service account preparation \u00b6 You can use existing subscriptions/topics, or let Argo Events create them. Here's the table of which fields are required in the configuration file and what permissions are needed for service account. Actions Required configuration fields Necessary permissions for service account Example role Use existing subscription Existing SubscriptionID pubsub.subscriptions.consume for the subscription roles/pubsub.subscriber Use existing subscription and verify topic Existing SubscriptionID and its Topic Above + pubsub.subscriptions.get for the subscription roles/pubsub.subscriber + roles/pubsub.viewer Create subscription for existing topic Existing Topic ( SubscriptionID is optional\u2020) Above + pubsub.subscriptions.create for the project pubsub.topics.attachSubscription for the topic roles/pubsub.subscriber + roles/pubsub.editor Create topic and subscription Non-existing Topic ( SubscriptionID is optional\u2020) Above + pubsub.topic.create for the project roles/pubsub.subscriber + roles/pubsub.editor \u2020 If you omit SubscriptionID , a generated hash value is used. For more details about access control, refer to GCP documents: Access control | Cloud Pub/Sub Documentation | Google Cloud \u29c9 Troubleshoot \u00b6 Please read the FAQ .","title":"GCP Pub/Sub"},{"location":"eventsources/setup/gcp-pub-sub/#gcp-pubsub","text":"GCP Pub/Sub event-source subscribes to messages published by GCP publisher and helps sensor trigger workloads.","title":"GCP Pub/Sub"},{"location":"eventsources/setup/gcp-pub-sub/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"id\" : \"message id\" , // Attributes represents the key-value pairs the current message // is labelled with. \"attributes\" : \"key-values\" , \"publishTime\" : \"// The time at which the message was published\" , \"body\" : \"body refers to the message data\" , } }","title":"Event Structure"},{"location":"eventsources/setup/gcp-pub-sub/#specification","text":"GCP Pub/Sub event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/gcp-pub-sub/#setup","text":"Fetch the project credentials JSON file from GCP console. If you use Workload Identity, you can skip this and next steps. Create a K8s secret called gcp-credentials to store the credentials file. apiVersion : v1 data : key . json : < YOUR_CREDENTIALS_STRING_FROM_JSON_FILE > kind : Secret metadata : name : gcp - credentials namespace : argo - events type : Opaque Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/gcp-pubsub.yaml If you use Workload Identity, omit credentialSecret field. Instead don't forget to configure appropriate service account (see example ). Inspect the event-source pod logs to make sure it was able to subscribe to the topic. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gcp-pubsub.yaml Publish a message from GCP Pub/Sub console. Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/gcp-pub-sub/#subscription-topic-and-service-account-preparation","text":"You can use existing subscriptions/topics, or let Argo Events create them. Here's the table of which fields are required in the configuration file and what permissions are needed for service account. Actions Required configuration fields Necessary permissions for service account Example role Use existing subscription Existing SubscriptionID pubsub.subscriptions.consume for the subscription roles/pubsub.subscriber Use existing subscription and verify topic Existing SubscriptionID and its Topic Above + pubsub.subscriptions.get for the subscription roles/pubsub.subscriber + roles/pubsub.viewer Create subscription for existing topic Existing Topic ( SubscriptionID is optional\u2020) Above + pubsub.subscriptions.create for the project pubsub.topics.attachSubscription for the topic roles/pubsub.subscriber + roles/pubsub.editor Create topic and subscription Non-existing Topic ( SubscriptionID is optional\u2020) Above + pubsub.topic.create for the project roles/pubsub.subscriber + roles/pubsub.editor \u2020 If you omit SubscriptionID , a generated hash value is used. For more details about access control, refer to GCP documents: Access control | Cloud Pub/Sub Documentation | Google Cloud \u29c9","title":"Subscription, topic and service account preparation"},{"location":"eventsources/setup/gcp-pub-sub/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/github/","text":"GitHub \u00b6 GitHub event-source programmatically configures webhooks for projects on GitHub and helps sensor trigger the workloads on events. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Github event data\" , \"headers\" : \"Headers from the Github event\" , } } Specification \u00b6 GitHub event-source specification is available here . Example event-source yaml file is here . Setup \u00b6 Create an API token if you don't have one. Follow instructions to create a new GitHub API Token. Grant it the repo_hook permissions. Base64 encode your API token key. echo -n | base64 Create a secret called github-access that contains your encoded GitHub API token. You can also include a secret key that is encoded with base64 for your webhook if any. apiVersion : v1 kind : Secret metadata : name : github - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > secret : < base64 - encoded - webhook - secret - key > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f github-access.yaml The event-source for GitHub creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitHub. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to replace the url field. kubectl apply -n argo-events -f Go to Webhooks under your project settings on GitHub and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/github.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"GitHub"},{"location":"eventsources/setup/github/#github","text":"GitHub event-source programmatically configures webhooks for projects on GitHub and helps sensor trigger the workloads on events.","title":"GitHub"},{"location":"eventsources/setup/github/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the Github event data\" , \"headers\" : \"Headers from the Github event\" , } }","title":"Event Structure"},{"location":"eventsources/setup/github/#specification","text":"GitHub event-source specification is available here . Example event-source yaml file is here .","title":"Specification"},{"location":"eventsources/setup/github/#setup","text":"Create an API token if you don't have one. Follow instructions to create a new GitHub API Token. Grant it the repo_hook permissions. Base64 encode your API token key. echo -n | base64 Create a secret called github-access that contains your encoded GitHub API token. You can also include a secret key that is encoded with base64 for your webhook if any. apiVersion : v1 kind : Secret metadata : name : github - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > secret : < base64 - encoded - webhook - secret - key > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f github-access.yaml The event-source for GitHub creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitHub. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to replace the url field. kubectl apply -n argo-events -f Go to Webhooks under your project settings on GitHub and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/github.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/github/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/gitlab/","text":"GitLab \u00b6 GitLab event-source programmatically configures webhooks for projects on GitLab and helps sensor trigger the workloads upon events. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the GitLab event data\" , \"headers\" : \"Headers from the GitLab event\" , } } Specification \u00b6 GitLab event-source specification is available here . Example event-source yaml file is here . Setup \u00b6 Create an API token if you don't have one. Follow instructions to create a new GitLab API Token. Grant it the api permissions. Base64 encode your api token key. echo -n | base64 Create a secret called gitlab-access . apiVersion : v1 kind : Secret metadata : name : gitlab - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f gitlab-access.yaml The event-source for GitLab creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitLab. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to update url field. kubectl apply -n argo-events -f Go to Webhooks under your project settings on GitLab and verify the webhook is registered. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gitlab.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"GitLab"},{"location":"eventsources/setup/gitlab/#gitlab","text":"GitLab event-source programmatically configures webhooks for projects on GitLab and helps sensor trigger the workloads upon events.","title":"GitLab"},{"location":"eventsources/setup/gitlab/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the GitLab event data\" , \"headers\" : \"Headers from the GitLab event\" , } }","title":"Event Structure"},{"location":"eventsources/setup/gitlab/#specification","text":"GitLab event-source specification is available here . Example event-source yaml file is here .","title":"Specification"},{"location":"eventsources/setup/gitlab/#setup","text":"Create an API token if you don't have one. Follow instructions to create a new GitLab API Token. Grant it the api permissions. Base64 encode your api token key. echo -n | base64 Create a secret called gitlab-access . apiVersion : v1 kind : Secret metadata : name : gitlab - access type : Opaque data : token : < base64 - encoded - api - token - from - previous - step > Deploy the secret into K8s cluster. kubectl -n argo-events apply -f gitlab-access.yaml The event-source for GitLab creates a pod and exposes it via service. The name for the service is in -eventsource-svc format. You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitLab. You can find more information on Ingress or Route online. Create the event source by running the following command. Make sure to update url field. kubectl apply -n argo-events -f Go to Webhooks under your project settings on GitLab and verify the webhook is registered. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gitlab.yaml Make a change to one of your project files and commit. It will trigger an argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/gitlab/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/kafka/","text":"Kafka \u00b6 Kafka event-source listens to messages on topics and helps the sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"kafka_topic\" , \"partition\" : \"partition_number\" , \"body\" : \"message_body\" , \"timestamp\" : \"timestamp_of_the_message\" } } Specification \u00b6 Kafka event-source specification is available here . Setup \u00b6 Make sure to set up the Kafka cluster in Kubernetes if you don't already have one. You can refer to https://github.com/Yolean/kubernetes-kafka for installation instructions. Create the event source by running the following command. Make sure to update the appropriate fields. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/kafka.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/kafka.yaml Send message by using Kafka client. More info on how to send message at https://kafka.apache.org/quickstart . Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Kafka"},{"location":"eventsources/setup/kafka/#kafka","text":"Kafka event-source listens to messages on topics and helps the sensor trigger workloads.","title":"Kafka"},{"location":"eventsources/setup/kafka/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"kafka_topic\" , \"partition\" : \"partition_number\" , \"body\" : \"message_body\" , \"timestamp\" : \"timestamp_of_the_message\" } }","title":"Event Structure"},{"location":"eventsources/setup/kafka/#specification","text":"Kafka event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/kafka/#setup","text":"Make sure to set up the Kafka cluster in Kubernetes if you don't already have one. You can refer to https://github.com/Yolean/kubernetes-kafka for installation instructions. Create the event source by running the following command. Make sure to update the appropriate fields. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/kafka.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/kafka.yaml Send message by using Kafka client. More info on how to send message at https://kafka.apache.org/quickstart . Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/kafka/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/minio/","text":"Minio \u00b6 Minio event-source listens to minio bucket notifications and helps sensor trigger the workloads. Note : Minio event-source is exclusive for the Minio server. If you want to trigger workloads on AWS S3 bucket notification, please set up the AWS SNS event-source. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { notification : [ { /* Minio notification . More info is available at https : //docs.min.io/docs/minio-bucket-notification-guide.html } ] } } Setup \u00b6 Make sure to have the minio server deployed and reachable from the event-source. If you are running Minio locally, make sure to port-forward to minio pod in order to make the service available outside local K8s cluster. kubectl -n argo-events port-forward 9000:9000 Configure the minio client mc . mc config host add minio http://localhost:9000 minio minio123 Create a K8s secret that holds the access and secret key. This secret will be referred in the minio event source definition that we are going to install in a later step. apiVersion : v1 data : # base64 of minio accesskey : bWluaW8 = # base64 of minio123 secretkey : bWluaW8xMjM = kind : Secret metadata : name : artifacts - minio namespace : argo - events The event source we are going to use configures notifications for a bucket called input . mc mb minio/input Let's install event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/minio.yaml Let's create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/minio.yaml Create a file named and hello-world.txt and upload it onto to the input bucket. This will trigger the argo workflow. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Minio"},{"location":"eventsources/setup/minio/#minio","text":"Minio event-source listens to minio bucket notifications and helps sensor trigger the workloads. Note : Minio event-source is exclusive for the Minio server. If you want to trigger workloads on AWS S3 bucket notification, please set up the AWS SNS event-source.","title":"Minio"},{"location":"eventsources/setup/minio/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { notification : [ { /* Minio notification . More info is available at https : //docs.min.io/docs/minio-bucket-notification-guide.html } ] } }","title":"Event Structure"},{"location":"eventsources/setup/minio/#setup","text":"Make sure to have the minio server deployed and reachable from the event-source. If you are running Minio locally, make sure to port-forward to minio pod in order to make the service available outside local K8s cluster. kubectl -n argo-events port-forward 9000:9000 Configure the minio client mc . mc config host add minio http://localhost:9000 minio minio123 Create a K8s secret that holds the access and secret key. This secret will be referred in the minio event source definition that we are going to install in a later step. apiVersion : v1 data : # base64 of minio accesskey : bWluaW8 = # base64 of minio123 secretkey : bWluaW8xMjM = kind : Secret metadata : name : artifacts - minio namespace : argo - events The event source we are going to use configures notifications for a bucket called input . mc mb minio/input Let's install event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/minio.yaml Let's create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/minio.yaml Create a file named and hello-world.txt and upload it onto to the input bucket. This will trigger the argo workflow. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/minio/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/mqtt/","text":"MQTT \u00b6 The event-source listens to messages over MQTT and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"Topic refers to the MQTT topic name\" , \"messageId\" : \"MessageId is the unique ID for the message\" , \"body\" : \"Body is the message payload\" } } Specification \u00b6 MQTT event-source specification is available here . Setup \u00b6 Make sure to set up the MQTT Broker and Bridge in Kubernetes if you don't already have one. Create the event source by running the following command. Make sure to update the appropriate fields. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/mqtt.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/mqtt-sensor.yaml Send message by using MQTT client. Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"MQTT"},{"location":"eventsources/setup/mqtt/#mqtt","text":"The event-source listens to messages over MQTT and helps sensor trigger the workloads.","title":"MQTT"},{"location":"eventsources/setup/mqtt/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"topic\" : \"Topic refers to the MQTT topic name\" , \"messageId\" : \"MessageId is the unique ID for the message\" , \"body\" : \"Body is the message payload\" } }","title":"Event Structure"},{"location":"eventsources/setup/mqtt/#specification","text":"MQTT event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/mqtt/#setup","text":"Make sure to set up the MQTT Broker and Bridge in Kubernetes if you don't already have one. Create the event source by running the following command. Make sure to update the appropriate fields. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/mqtt.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/mqtt-sensor.yaml Send message by using MQTT client. Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/mqtt/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/nats/","text":"NATS \u00b6 NATS event-source listens to NATS subject notifications and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"subject\" : \"name_of_the_nats_subject\" , \"headers\" : \"headers_of_the_nats_message\" , \"body\" : \"message_payload\" } } Specification \u00b6 NATS event-source specification is available here . Setup \u00b6 Make sure to have NATS cluster deployed in the Kubernetes. If you don't have one already installed, please refer https://github.com/nats-io/nats-operator for details. NATS cluster setup for test purposes, apiVersion : v1 kind : Service metadata : name : nats namespace : argo - events labels : component : nats spec : selector : component : nats type : ClusterIP ports : - name : client port : 4222 - name : cluster port : 6222 - name : monitor port : 8222 --- apiVersion : apps / v1beta1 kind : StatefulSet metadata : name : nats namespace : argo - events labels : component : nats spec : serviceName : nats replicas : 1 template : metadata : labels : component : nats spec : containers : - name : nats image : nats : latest ports : - containerPort : 4222 name : client - containerPort : 6222 name : cluster - containerPort : 8222 name : monitor livenessProbe : httpGet : path : / port : 8222 initialDelaySeconds : 10 timeoutSeconds : 5 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nats.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/nats.yaml If you are running NATS on local K8s cluster, make sure to port-forward to pod, kubectl -n argo-events port-forward 4222:4222 Publish a message for the subject specified in the event source. Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe . go run main . go - s localhost foo '{\"message\": \"hello\"}' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"NATS"},{"location":"eventsources/setup/nats/#nats","text":"NATS event-source listens to NATS subject notifications and helps sensor trigger the workloads.","title":"NATS"},{"location":"eventsources/setup/nats/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"subject\" : \"name_of_the_nats_subject\" , \"headers\" : \"headers_of_the_nats_message\" , \"body\" : \"message_payload\" } }","title":"Event Structure"},{"location":"eventsources/setup/nats/#specification","text":"NATS event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/nats/#setup","text":"Make sure to have NATS cluster deployed in the Kubernetes. If you don't have one already installed, please refer https://github.com/nats-io/nats-operator for details. NATS cluster setup for test purposes, apiVersion : v1 kind : Service metadata : name : nats namespace : argo - events labels : component : nats spec : selector : component : nats type : ClusterIP ports : - name : client port : 4222 - name : cluster port : 6222 - name : monitor port : 8222 --- apiVersion : apps / v1beta1 kind : StatefulSet metadata : name : nats namespace : argo - events labels : component : nats spec : serviceName : nats replicas : 1 template : metadata : labels : component : nats spec : containers : - name : nats image : nats : latest ports : - containerPort : 4222 name : client - containerPort : 6222 name : cluster - containerPort : 8222 name : monitor livenessProbe : httpGet : path : / port : 8222 initialDelaySeconds : 10 timeoutSeconds : 5 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nats.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/nats.yaml If you are running NATS on local K8s cluster, make sure to port-forward to pod, kubectl -n argo-events port-forward 4222:4222 Publish a message for the subject specified in the event source. Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe . go run main . go - s localhost foo '{\"message\": \"hello\"}' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/nats/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/nsq/","text":"NSQ \u00b6 NSQ event-source subscribes to nsq pub/sub notifications and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the message data\" , \"timestamp\" : \"timestamp of the message\" , \"nsqdAddress\" : \"NSQDAddress is the address of the nsq host\" } } Specification \u00b6 NSQ event-source is available here . Setup \u00b6 Deploy NSQ on local K8s cluster. apiVersion : v1 kind : Service metadata : name : nsqlookupd labels : app : nsq spec : ports : - port : 4160 targetPort : 4160 name : tcp - port : 4161 targetPort : 4161 name : http clusterIP : None selector : app : nsq component : nsqlookupd --- apiVersion : v1 kind : Service metadata : name : nsqd labels : app : nsq spec : ports : - port : 4150 targetPort : 4150 name : tcp - port : 4151 targetPort : 4151 name : http clusterIP : None selector : app : nsq component : nsqd --- apiVersion : v1 kind : Service metadata : name : nsqadmin labels : app : nsq spec : ports : - port : 4170 targetPort : 4170 name : tcp - port : 4171 targetPort : 4171 name : http selector : app : nsq component : nsqadmin --- apiVersion : apps / v1beta1 kind : StatefulSet metadata : name : nsqlookupd spec : serviceName : \"nsqlookupd\" replicas : 1 updateStrategy : type : RollingUpdate template : metadata : labels : app : nsq component : nsqlookupd spec : containers : - name : nsqlookupd image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4160 name : tcp - containerPort : 4161 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 2 command : - / nsqlookupd terminationGracePeriodSeconds : 5 --- apiVersion : apps / v1beta1 kind : Deployment metadata : name : nsqd spec : replicas : 1 selector : matchLabels : app : nsq component : nsqd template : metadata : labels : app : nsq component : nsqd spec : containers : - name : nsqd image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4150 name : tcp - containerPort : 4151 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 2 volumeMounts : - name : datadir mountPath : / data command : - / nsqd - - data - path - / data - - lookupd - tcp - address - nsqlookupd . argo - events . svc : 4160 - - broadcast - address - nsqd . argo - events . svc env : - name : HOSTNAME valueFrom : fieldRef : fieldPath : metadata . name terminationGracePeriodSeconds : 5 volumes : - name : datadir emptyDir : {} --- apiVersion : extensions / v1beta1 kind : Deployment metadata : name : nsqadmin spec : replicas : 1 template : metadata : labels : app : nsq component : nsqadmin spec : containers : - name : nsqadmin image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4170 name : tcp - containerPort : 4171 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 10 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 command : - / nsqadmin - - lookupd - http - address - nsqlookupd . argo - events . svc : 4161 terminationGracePeriodSeconds : 5 Expose NSQD by kubectl port-forward . kubectl -n argo-events port-forward service/nsqd 4151:4151 Create topic hello and channel my-channel . curl -X POST 'http://localhost:4151/topic/create?topic=hello' curl -X POST 'http://localhost:4151/channel/create?topic=hello&channel=my-channel' Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nsq.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/nsq.yaml Publish a message on topic hello and channel my-channel . curl -d '{\"message\": \"hello\"}' 'http://localhost:4151/pub?topic=hello&channel=my-channel' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"NSQ"},{"location":"eventsources/setup/nsq/#nsq","text":"NSQ event-source subscribes to nsq pub/sub notifications and helps sensor trigger the workloads.","title":"NSQ"},{"location":"eventsources/setup/nsq/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"Body is the message data\" , \"timestamp\" : \"timestamp of the message\" , \"nsqdAddress\" : \"NSQDAddress is the address of the nsq host\" } }","title":"Event Structure"},{"location":"eventsources/setup/nsq/#specification","text":"NSQ event-source is available here .","title":"Specification"},{"location":"eventsources/setup/nsq/#setup","text":"Deploy NSQ on local K8s cluster. apiVersion : v1 kind : Service metadata : name : nsqlookupd labels : app : nsq spec : ports : - port : 4160 targetPort : 4160 name : tcp - port : 4161 targetPort : 4161 name : http clusterIP : None selector : app : nsq component : nsqlookupd --- apiVersion : v1 kind : Service metadata : name : nsqd labels : app : nsq spec : ports : - port : 4150 targetPort : 4150 name : tcp - port : 4151 targetPort : 4151 name : http clusterIP : None selector : app : nsq component : nsqd --- apiVersion : v1 kind : Service metadata : name : nsqadmin labels : app : nsq spec : ports : - port : 4170 targetPort : 4170 name : tcp - port : 4171 targetPort : 4171 name : http selector : app : nsq component : nsqadmin --- apiVersion : apps / v1beta1 kind : StatefulSet metadata : name : nsqlookupd spec : serviceName : \"nsqlookupd\" replicas : 1 updateStrategy : type : RollingUpdate template : metadata : labels : app : nsq component : nsqlookupd spec : containers : - name : nsqlookupd image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4160 name : tcp - containerPort : 4161 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 2 command : - / nsqlookupd terminationGracePeriodSeconds : 5 --- apiVersion : apps / v1beta1 kind : Deployment metadata : name : nsqd spec : replicas : 1 selector : matchLabels : app : nsq component : nsqd template : metadata : labels : app : nsq component : nsqd spec : containers : - name : nsqd image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4150 name : tcp - containerPort : 4151 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 2 volumeMounts : - name : datadir mountPath : / data command : - / nsqd - - data - path - / data - - lookupd - tcp - address - nsqlookupd . argo - events . svc : 4160 - - broadcast - address - nsqd . argo - events . svc env : - name : HOSTNAME valueFrom : fieldRef : fieldPath : metadata . name terminationGracePeriodSeconds : 5 volumes : - name : datadir emptyDir : {} --- apiVersion : extensions / v1beta1 kind : Deployment metadata : name : nsqadmin spec : replicas : 1 template : metadata : labels : app : nsq component : nsqadmin spec : containers : - name : nsqadmin image : nsqio / nsq : v1 . 1.0 imagePullPolicy : Always resources : requests : cpu : 30 m memory : 64 Mi ports : - containerPort : 4170 name : tcp - containerPort : 4171 name : http livenessProbe : httpGet : path : / ping port : http initialDelaySeconds : 10 readinessProbe : httpGet : path : / ping port : http initialDelaySeconds : 5 command : - / nsqadmin - - lookupd - http - address - nsqlookupd . argo - events . svc : 4161 terminationGracePeriodSeconds : 5 Expose NSQD by kubectl port-forward . kubectl -n argo-events port-forward service/nsqd 4151:4151 Create topic hello and channel my-channel . curl -X POST 'http://localhost:4151/topic/create?topic=hello' curl -X POST 'http://localhost:4151/channel/create?topic=hello&channel=my-channel' Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nsq.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/nsq.yaml Publish a message on topic hello and channel my-channel . curl -d '{\"message\": \"hello\"}' 'http://localhost:4151/pub?topic=hello&channel=my-channel' Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/nsq/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/pulsar/","text":"Pulsar \u00b6 Pulsar event-source subscribes to the topics, listens events and helps sensor trigger the workflows. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"body is the message data\" , \"publishTime\" : \"timestamp of the message\" , \"key\" : \"message key\" } } Specification \u00b6 Pulsar event-source is available here . Setup \u00b6 To test locally, deploy a standalone Pulsar. apiVersion : apps / v1 kind : Deployment metadata : name : pulsar labels : app : pulsar spec : replicas : 1 template : metadata : name : pulsar labels : app : pulsar spec : containers : - name : pulsar image : apachepulsar / pulsar : 2.4 . 1 command : - bin / pulsar - standalone imagePullPolicy : IfNotPresent volumeMounts : - mountPath : /pulsar/ data name : datadir restartPolicy : Always volumes : - name : datadir emptyDir : {} selector : matchLabels : app : pulsar --- apiVersion : v1 kind : Service metadata : name : pulsar spec : selector : app : pulsar ports : - port : 8080 targetPort : 8080 name : http - port : 6650 name : another targetPort : 6650 type : LoadBalancer Port forward to the pulsar pod using kubectl for port 6650. For production deployment, follow the official Pulsar documentation online. Deploy the eventsource. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/pulsar.yaml Deploy the sensor. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/pulsar.yaml Publish a message on topic test . Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Pulsar"},{"location":"eventsources/setup/pulsar/#pulsar","text":"Pulsar event-source subscribes to the topics, listens events and helps sensor trigger the workflows.","title":"Pulsar"},{"location":"eventsources/setup/pulsar/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : \"body is the message data\" , \"publishTime\" : \"timestamp of the message\" , \"key\" : \"message key\" } }","title":"Event Structure"},{"location":"eventsources/setup/pulsar/#specification","text":"Pulsar event-source is available here .","title":"Specification"},{"location":"eventsources/setup/pulsar/#setup","text":"To test locally, deploy a standalone Pulsar. apiVersion : apps / v1 kind : Deployment metadata : name : pulsar labels : app : pulsar spec : replicas : 1 template : metadata : name : pulsar labels : app : pulsar spec : containers : - name : pulsar image : apachepulsar / pulsar : 2.4 . 1 command : - bin / pulsar - standalone imagePullPolicy : IfNotPresent volumeMounts : - mountPath : /pulsar/ data name : datadir restartPolicy : Always volumes : - name : datadir emptyDir : {} selector : matchLabels : app : pulsar --- apiVersion : v1 kind : Service metadata : name : pulsar spec : selector : app : pulsar ports : - port : 8080 targetPort : 8080 name : http - port : 6650 name : another targetPort : 6650 type : LoadBalancer Port forward to the pulsar pod using kubectl for port 6650. For production deployment, follow the official Pulsar documentation online. Deploy the eventsource. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/pulsar.yaml Deploy the sensor. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/pulsar.yaml Publish a message on topic test . Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/pulsar/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/redis-streams/","text":"Redis Streams \u00b6 Redis stream event-source listens to messages on Redis streams and helps sensor trigger workloads. Messages from the stream are read using the Redis consumer group. The main reason for using consumer group is to resume from the last read upon pod restarts. A common consumer group (defaults to \"argo-events-cg\") is created (if not already exists) on all specified streams. When using consumer group, each read through a consumer group is a write operation, because Redis needs to update the last retrieved message id and the pending entries list(PEL) of that specific user in the consumer group. So it can only work with the master Redis instance and not replicas ( https://redis.io/topics/streams-intro ). Redis stream event source expects all the streams to be present on the Redis server. This event source only starts pulling messages from the streams when all of the specified streams exist on the Redis server. On the initial setup, the consumer group is created on all the specified streams to start reading from the latest message (not necessarily the beginning of the stream). On subsequent setups (the consumer group already exists on the streams) or during pod restarts, messages are pulled from the last unacknowledged message in the stream. The consumer group is never deleted automatically. If you want a completely fresh setup again, you must delete the consumer group from the streams. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" , \"time\" : \"event_time\" }, \"data\" : { \"stream\" : \"Name of the Redis stream\" , \"message_id\" : \"Message Id\" , \"values\" : \"message body\" } } Example: { \"context\" : { \"id\" : \"64313638396337352d623565612d343639302d383262362d306630333562333437363637\" , \"source\" : \"redis-stream\" , \"specversion\" : \"1.0\" , \"type\" : \"redisStream\" , \"datacontenttype\" : \"application/json\" , \"subject\" : \"example\" , \"time\" : \"2022-03-17T04:47:42Z\" }, \"data\" : { \"stream\" : \"FOO\" , \"message_id\" : \"1647495121754-0\" , \"values\" : { \"key-1\" : \"val-1\" , \"key-2\" : \"val-2\" } } } Specification \u00b6 Redis stream event-source specification is available here . Setup \u00b6 Follow the documentation to set up Redis database. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/redis-streams.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/redis-streams.yaml Log into redis pod using kubectl . kubectl - n argo - events exec - it < redis - pod - name > - c < redis - container - name > -- / bin / bash Run redis-cli and publish a message on the stream FOO . XADD FOO * message hello Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Redis stream event source expects all the streams to be present on redis server. It only starts pulling messages from the streams when all of the specified streams exist on the redis server. Please read the FAQ .","title":"Redis Streams"},{"location":"eventsources/setup/redis-streams/#redis-streams","text":"Redis stream event-source listens to messages on Redis streams and helps sensor trigger workloads. Messages from the stream are read using the Redis consumer group. The main reason for using consumer group is to resume from the last read upon pod restarts. A common consumer group (defaults to \"argo-events-cg\") is created (if not already exists) on all specified streams. When using consumer group, each read through a consumer group is a write operation, because Redis needs to update the last retrieved message id and the pending entries list(PEL) of that specific user in the consumer group. So it can only work with the master Redis instance and not replicas ( https://redis.io/topics/streams-intro ). Redis stream event source expects all the streams to be present on the Redis server. This event source only starts pulling messages from the streams when all of the specified streams exist on the Redis server. On the initial setup, the consumer group is created on all the specified streams to start reading from the latest message (not necessarily the beginning of the stream). On subsequent setups (the consumer group already exists on the streams) or during pod restarts, messages are pulled from the last unacknowledged message in the stream. The consumer group is never deleted automatically. If you want a completely fresh setup again, you must delete the consumer group from the streams.","title":"Redis Streams"},{"location":"eventsources/setup/redis-streams/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"id\" : \"unique_event_id\" , \"source\" : \"name_of_the_event_source\" , \"specversion\" : \"cloud_events_version\" , \"type\" : \"type_of_event_source\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" , \"time\" : \"event_time\" }, \"data\" : { \"stream\" : \"Name of the Redis stream\" , \"message_id\" : \"Message Id\" , \"values\" : \"message body\" } } Example: { \"context\" : { \"id\" : \"64313638396337352d623565612d343639302d383262362d306630333562333437363637\" , \"source\" : \"redis-stream\" , \"specversion\" : \"1.0\" , \"type\" : \"redisStream\" , \"datacontenttype\" : \"application/json\" , \"subject\" : \"example\" , \"time\" : \"2022-03-17T04:47:42Z\" }, \"data\" : { \"stream\" : \"FOO\" , \"message_id\" : \"1647495121754-0\" , \"values\" : { \"key-1\" : \"val-1\" , \"key-2\" : \"val-2\" } } }","title":"Event Structure"},{"location":"eventsources/setup/redis-streams/#specification","text":"Redis stream event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/redis-streams/#setup","text":"Follow the documentation to set up Redis database. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/redis-streams.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/redis-streams.yaml Log into redis pod using kubectl . kubectl - n argo - events exec - it < redis - pod - name > - c < redis - container - name > -- / bin / bash Run redis-cli and publish a message on the stream FOO . XADD FOO * message hello Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/redis-streams/#troubleshoot","text":"Redis stream event source expects all the streams to be present on redis server. It only starts pulling messages from the streams when all of the specified streams exist on the redis server. Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/redis/","text":"Redis \u00b6 Redis event-source subscribes to Redis publisher and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"channel\" : \"Subscription channel\" , \"pattern\" : \"Message pattern\" , \"body\" : \"message body\" // string } } Specification \u00b6 Redis event-source specification is available here . Setup \u00b6 Follow the documentation to set up Redis database. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/redis.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/redis.yaml Log into redis pod using kubectl . kubectl - n argo - events exec - it < redis - pod - name > - c < redis - container - name > -- / bin / bash Run redis-cli and publish a message on FOO channel. PUBLISH FOO hello Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow. Troubleshoot \u00b6 Please read the FAQ .","title":"Redis"},{"location":"eventsources/setup/redis/#redis","text":"Redis event-source subscribes to Redis publisher and helps sensor trigger workloads.","title":"Redis"},{"location":"eventsources/setup/redis/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"channel\" : \"Subscription channel\" , \"pattern\" : \"Message pattern\" , \"body\" : \"message body\" // string } }","title":"Event Structure"},{"location":"eventsources/setup/redis/#specification","text":"Redis event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/redis/#setup","text":"Follow the documentation to set up Redis database. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/redis.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/redis.yaml Log into redis pod using kubectl . kubectl - n argo - events exec - it < redis - pod - name > - c < redis - container - name > -- / bin / bash Run redis-cli and publish a message on FOO channel. PUBLISH FOO hello Once a message is published, an argo workflow will be triggered. Run argo list to find the workflow.","title":"Setup"},{"location":"eventsources/setup/redis/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/resource/","text":"Resource \u00b6 Resource event-source watches change notifications for K8s object and helps sensor trigger the workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"type\" : \"type_of_the_event\" , // ADD, UPDATE or DELETE \"body\" : \"resource_body\" , // JSON format \"group\" : \"resource_group_name\" , \"version\" : \"resource_version_name\" , \"resource\" : \"resource_name\" } } Specification \u00b6 Resource event-source specification is available here . Setup \u00b6 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/resource.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/resource.yaml The event source we created in step 1 contains configuration which makes the event-source listen to Argo workflows marked with label app: my-workflow . Lets create a workflow called my-workflow with label app: my-workflow . apiVersion : argoproj . io / v1alpha1 kind : Workflow metadata : name : my - workflow labels : app : my - workflow spec : entrypoint : whalesay templates : - name : whalesay container : image : docker / whalesay : latest command : [ cowsay ] args : [ \"hello world\" ] Once the my-workflow is created, the sensor will trigger the workflow. Run argo list to list the triggered workflow. List Options \u00b6 The Resource Event-Source allows to configure the list options through labels and field selectors for setting up a watch on objects. In the example above, we had set up the list option as follows, filter : # labels and filters are meant to provide K8s API options to filter the object list that are being watched . # Please read https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # api for more details . # labels provide listing options to K8s API to watch objects labels : - key : app # Supported operations like == , != , etc . # Defaults to == . # Refer https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # label - selectors for more info . # optional . operation : \"==\" value : my - workflow The key-operation-value items under the filter -> labels are used by the event-source to filter the objects that are eligible for the watch. So, in the present case, the event-source will set up a watch for those objects who have label \"app: my-workflow\". You can add more key-operation-value items to the list as per your use-case. Similarly, you can pass field selectors to the watch list options, e.g., filter : # labels and filters are meant to provide K8s API options to filter the object list that are being watched . # Please read https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # api for more details . # fields provide listing options to K8s API to watch objects fields : - key : metadata . name # Supported operations like == , != , <= , >= etc . # Defaults to == . # Refer https : // kubernetes . io / docs / concepts / overview / working - with - objects / field - selectors / for more info . # optional . operation : == value : my - workflow Note: The label and fields under filter are used at the time of setting up the watch by the event-source. If you want to filter the objects based on the annotations or some other fields, use the Data Filters available in the sensor. Troubleshoot \u00b6 Please read the FAQ .","title":"Resource"},{"location":"eventsources/setup/resource/#resource","text":"Resource event-source watches change notifications for K8s object and helps sensor trigger the workloads.","title":"Resource"},{"location":"eventsources/setup/resource/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"type\" : \"type_of_the_event\" , // ADD, UPDATE or DELETE \"body\" : \"resource_body\" , // JSON format \"group\" : \"resource_group_name\" , \"version\" : \"resource_version_name\" , \"resource\" : \"resource_name\" } }","title":"Event Structure"},{"location":"eventsources/setup/resource/#specification","text":"Resource event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/resource/#setup","text":"Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/resource.yaml Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/resource.yaml The event source we created in step 1 contains configuration which makes the event-source listen to Argo workflows marked with label app: my-workflow . Lets create a workflow called my-workflow with label app: my-workflow . apiVersion : argoproj . io / v1alpha1 kind : Workflow metadata : name : my - workflow labels : app : my - workflow spec : entrypoint : whalesay templates : - name : whalesay container : image : docker / whalesay : latest command : [ cowsay ] args : [ \"hello world\" ] Once the my-workflow is created, the sensor will trigger the workflow. Run argo list to list the triggered workflow.","title":"Setup"},{"location":"eventsources/setup/resource/#list-options","text":"The Resource Event-Source allows to configure the list options through labels and field selectors for setting up a watch on objects. In the example above, we had set up the list option as follows, filter : # labels and filters are meant to provide K8s API options to filter the object list that are being watched . # Please read https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # api for more details . # labels provide listing options to K8s API to watch objects labels : - key : app # Supported operations like == , != , etc . # Defaults to == . # Refer https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # label - selectors for more info . # optional . operation : \"==\" value : my - workflow The key-operation-value items under the filter -> labels are used by the event-source to filter the objects that are eligible for the watch. So, in the present case, the event-source will set up a watch for those objects who have label \"app: my-workflow\". You can add more key-operation-value items to the list as per your use-case. Similarly, you can pass field selectors to the watch list options, e.g., filter : # labels and filters are meant to provide K8s API options to filter the object list that are being watched . # Please read https : // kubernetes . io / docs / concepts / overview / working - with - objects / labels / # api for more details . # fields provide listing options to K8s API to watch objects fields : - key : metadata . name # Supported operations like == , != , <= , >= etc . # Defaults to == . # Refer https : // kubernetes . io / docs / concepts / overview / working - with - objects / field - selectors / for more info . # optional . operation : == value : my - workflow Note: The label and fields under filter are used at the time of setting up the watch by the event-source. If you want to filter the objects based on the annotations or some other fields, use the Data Filters available in the sensor.","title":"List Options"},{"location":"eventsources/setup/resource/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/sftp/","text":"SFTP \u00b6 SFTP event-source polls an SFTP server to identify changes and helps sensor trigger workloads. Event Structure \u00b6 The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"name\" : \"Relative path to the file or directory\" , \"op\" : \"File operation that triggered the event\" // Create, Remove } } Specification \u00b6 SFTP event-source specification is available here . Setup \u00b6 Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/sftp.yaml The event source has configuration to poll the sftp server every 10 seconds for test-data directory and file(s) called x.txt . Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/sftp.yaml Log into the event-source pod by running following command. kubectl - n argo - events exec - it < event - source - pod - name > - c sftp - events -- / bin / bash Create a file called x.txt under test-data directory on the SFTP server. Once you create file x.txt , the sensor will trigger argo workflow. Run argo list to find the workflow. For real-world use cases, you should use PersistentVolumeClaim. Troubleshoot \u00b6 Please read the FAQ .","title":"SFTP"},{"location":"eventsources/setup/sftp/#sftp","text":"SFTP event-source polls an SFTP server to identify changes and helps sensor trigger workloads.","title":"SFTP"},{"location":"eventsources/setup/sftp/#event-structure","text":"The structure of an event dispatched by the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"name\" : \"Relative path to the file or directory\" , \"op\" : \"File operation that triggered the event\" // Create, Remove } }","title":"Event Structure"},{"location":"eventsources/setup/sftp/#specification","text":"SFTP event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/sftp/#setup","text":"Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/sftp.yaml The event source has configuration to poll the sftp server every 10 seconds for test-data directory and file(s) called x.txt . Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/sftp.yaml Log into the event-source pod by running following command. kubectl - n argo - events exec - it < event - source - pod - name > - c sftp - events -- / bin / bash Create a file called x.txt under test-data directory on the SFTP server. Once you create file x.txt , the sensor will trigger argo workflow. Run argo list to find the workflow. For real-world use cases, you should use PersistentVolumeClaim.","title":"Setup"},{"location":"eventsources/setup/sftp/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"eventsources/setup/webhook/","text":"Webhook \u00b6 Webhook event-source exposes a http server and allows external entities to trigger workloads via http requests. Event Structure \u00b6 The structure of an event dispatched by the event-source to the sensor looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : { /* the headers from the request received by the event - source from the external entity */ }, \"body\" : { /* the payload of the request received by the event - source from the external entity */ }, } } Specification \u00b6 Webhook event-source specification is available here . Setup \u00b6 Install the event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml The event-source pod is listening for HTTP requests on port 12000 and endpoint /example . It's time to create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml Once the sensor pod is in running state, test the setup by sending a POST request to event-source service. Troubleshoot \u00b6 Please read the FAQ .","title":"Webhook"},{"location":"eventsources/setup/webhook/#webhook","text":"Webhook event-source exposes a http server and allows external entities to trigger workloads via http requests.","title":"Webhook"},{"location":"eventsources/setup/webhook/#event-structure","text":"The structure of an event dispatched by the event-source to the sensor looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : { /* the headers from the request received by the event - source from the external entity */ }, \"body\" : { /* the payload of the request received by the event - source from the external entity */ }, } }","title":"Event Structure"},{"location":"eventsources/setup/webhook/#specification","text":"Webhook event-source specification is available here .","title":"Specification"},{"location":"eventsources/setup/webhook/#setup","text":"Install the event source in the argo-events namespace. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml The event-source pod is listening for HTTP requests on port 12000 and endpoint /example . It's time to create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml Once the sensor pod is in running state, test the setup by sending a POST request to event-source service.","title":"Setup"},{"location":"eventsources/setup/webhook/#troubleshoot","text":"Please read the FAQ .","title":"Troubleshoot"},{"location":"sensors/ha/","text":"Sensor High Availability \u00b6 Sensor controller creates a k8s deployment (replica number defaults to 1) for each Sensor object. HA with Active-Passive strategy can be achieved by setting spec.replicas to a number greater than 1, which means only one Pod serves traffic and the rest ones stand by. One of standby Pods will be automatically elected to be active if the old one is gone. Please DO NOT manually scale up the replicas, that might cause unexpected behaviors! Kubernetes Leader Election \u00b6 By default, Argo Events will use NATS for the HA leader election except when using a Kafka Eventbus, in which case a leader election is not required as a Sensor that uses a Kafka EventBus is capable of horizontally scaling. If using a different EventBus you can opt-in to a Kubernetes native leader election by specifying the following annotation. annotations : events.argoproj.io/leader-election : k8s To use Kubernetes leader election the following RBAC rules need to be associated with the Sensor ServiceAccount. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : argo-events-leaderelection-role rules : - apiGroups : [ \"coordination.k8s.io\" ] resources : [ \"leases\" ] verbs : [ \"get\" , \"create\" , \"update\" ] More \u00b6 Click here to learn more information about Argo Events DR/HA recommendations.","title":"Sensor High Availability"},{"location":"sensors/ha/#sensor-high-availability","text":"Sensor controller creates a k8s deployment (replica number defaults to 1) for each Sensor object. HA with Active-Passive strategy can be achieved by setting spec.replicas to a number greater than 1, which means only one Pod serves traffic and the rest ones stand by. One of standby Pods will be automatically elected to be active if the old one is gone. Please DO NOT manually scale up the replicas, that might cause unexpected behaviors!","title":"Sensor High Availability"},{"location":"sensors/ha/#kubernetes-leader-election","text":"By default, Argo Events will use NATS for the HA leader election except when using a Kafka Eventbus, in which case a leader election is not required as a Sensor that uses a Kafka EventBus is capable of horizontally scaling. If using a different EventBus you can opt-in to a Kubernetes native leader election by specifying the following annotation. annotations : events.argoproj.io/leader-election : k8s To use Kubernetes leader election the following RBAC rules need to be associated with the Sensor ServiceAccount. apiVersion : rbac.authorization.k8s.io/v1 kind : Role metadata : name : argo-events-leaderelection-role rules : - apiGroups : [ \"coordination.k8s.io\" ] resources : [ \"leases\" ] verbs : [ \"get\" , \"create\" , \"update\" ]","title":"Kubernetes Leader Election"},{"location":"sensors/ha/#more","text":"Click here to learn more information about Argo Events DR/HA recommendations.","title":"More"},{"location":"sensors/more-about-sensors-and-triggers/","text":"More About Sensors And Triggers \u00b6 Multiple Dependencies \u00b6 If there are multiple dependencies defined in the Sensor , you can configure Trigger Conditions to determine what kind of situation could get the trigger executed. For example, there are 2 dependencies A and B are defined, then condition A || B means an event from either A or B will execute the trigger. What happens if A && B is defined? Assume before B has an event b1 delivered, A has already got events a1 - a10 , in this case, a10 and b1 will be used to execute the trigger, and a1 - a9 will be dropped. In short, at the moment Trigger Conditions resolve to true, the latest events from each dependencies will be used to trigger the actions. Duplicate Dependencies \u00b6 Due to technical reasons when using the NATS Streaming bus, the same eventSourceName and eventName combo can not be referenced twice in one Sensor object. For example, the following dependency definitions are not allowed. However, it can be referenced unlimited times in different Sensor objects, so if you do have similar requirements, use 2 Sensor objects instead. spec : dependencies : - name : dep01 eventSourceName : webhook eventName : example filters : data : - path : body.value type : number comparator : \"<\" value : - \"20.0\" - name : dep02 eventSourceName : webhook eventName : example filters : data : - path : body.value type : number comparator : \">\" value : - \"50.0\" Note that this is not an issue for the Jetstream bus, however. Events Delivery Order \u00b6 Following statements are based on using NATS Streaming as the EventBus. In general, the order of events delivered to a Sensor is the order they were published, but there's no guarantee for that. There could be cases that the Sensor fails to acknowledge the first message, and then succeeds to acknowledge the second one before the first one is redelivered. Events Delivery Guarantee \u00b6 NATS Streaming offers at-least-once delivery guarantee. Jetstream has additional features that get closer to \"exactly once\". In addition, in the Sensor application, an in-memory cache is implemented to cache the events IDs delivered in the last 5 minutes: this is used to make sure there won't be any duplicate events delivered. Based on this, we are able to achieve 1) \"exactly once\" in almost all cases, with the exception of pods dying while processing messages, and 2) \"at least once\" in all cases. Trigger Retries \u00b6 By default, there's no retry for the trigger execution, this is based on the fact that Sensor has no idea if failure retry would bring any unexpected results. If you prefer to have retry for the trigger , add retryStrategy to the spec. spec : triggers : - template : name : http-trigger http : url : https://xxxxx.com/ method : GET retryStrategy : # Give up after this many times steps : 3 Or if you want more control on the retries: spec : triggers : - retryStrategy : # Give up after this many times steps : 3 # The initial duration, use strings like \"2s\", \"1m\" duration : 2s # Duration is multiplied by factor each retry, if factor is not zero # and steps limit has not been reached. # Should not be negative # # Defaults to \"1.0\" factor : 2.0 # The sleep between each retry is the duration plus an additional # amount chosen uniformly at random from the interval between # zero and `jitter * duration`. # # Defaults to \"1\" jitter : 2 Trigger Rate Limit \u00b6 There's no rate limit for a trigger unless you configure the spec as following: spec : triggers : - rateLimit : # Second, Minute or Hour, defaults to Second unit : Second # Requests per unit requestsPerUnit : 20 Revision History Limit \u00b6 Optionally, a revisionHistoryLimit may be configured in the spec as following: spec : # Optional revisionHistoryLimit : 3 Dead Letter Queue Trigger \u00b6 To help avoid data loss and dropping a message on failure after all the retries are exhausted, optionally, a dlqTrigger may be configured as following to invoke any of the 10+ triggers : spec : triggers : - template : name : http-trigger http : url : https://xxxxx.com/ method : GET # must be true for dlqTrigger atLeastOnce : true retryStrategy : steps : 3 dlqTrigger : template : name : dlq-http-trigger http : url : https://xxxxx.com/ method : PUT # must be true for dlqTrigger atLeastOnce : true # retries the dlqTrigger 5 times retryStrategy : steps : 5 If the trigger fails, it will retry up to the configured number of retries based on retryStrategy . If the maximum retries are reached and the trigger, the dlqTrigger will be invoked if specified. In order to use the dlqTrigger , the atLeastOnce must be set to true within the trigger and the dlqTrigger for the Sensor to know about the failure and invoke the dlqTrigger . note: dlqTrigger is only available for the top level trigger and not *recursively within the dlqTrigger template.","title":"More Information"},{"location":"sensors/more-about-sensors-and-triggers/#more-about-sensors-and-triggers","text":"","title":"More About Sensors And Triggers"},{"location":"sensors/more-about-sensors-and-triggers/#multiple-dependencies","text":"If there are multiple dependencies defined in the Sensor , you can configure Trigger Conditions to determine what kind of situation could get the trigger executed. For example, there are 2 dependencies A and B are defined, then condition A || B means an event from either A or B will execute the trigger. What happens if A && B is defined? Assume before B has an event b1 delivered, A has already got events a1 - a10 , in this case, a10 and b1 will be used to execute the trigger, and a1 - a9 will be dropped. In short, at the moment Trigger Conditions resolve to true, the latest events from each dependencies will be used to trigger the actions.","title":"Multiple Dependencies"},{"location":"sensors/more-about-sensors-and-triggers/#duplicate-dependencies","text":"Due to technical reasons when using the NATS Streaming bus, the same eventSourceName and eventName combo can not be referenced twice in one Sensor object. For example, the following dependency definitions are not allowed. However, it can be referenced unlimited times in different Sensor objects, so if you do have similar requirements, use 2 Sensor objects instead. spec : dependencies : - name : dep01 eventSourceName : webhook eventName : example filters : data : - path : body.value type : number comparator : \"<\" value : - \"20.0\" - name : dep02 eventSourceName : webhook eventName : example filters : data : - path : body.value type : number comparator : \">\" value : - \"50.0\" Note that this is not an issue for the Jetstream bus, however.","title":"Duplicate Dependencies"},{"location":"sensors/more-about-sensors-and-triggers/#events-delivery-order","text":"Following statements are based on using NATS Streaming as the EventBus. In general, the order of events delivered to a Sensor is the order they were published, but there's no guarantee for that. There could be cases that the Sensor fails to acknowledge the first message, and then succeeds to acknowledge the second one before the first one is redelivered.","title":"Events Delivery Order"},{"location":"sensors/more-about-sensors-and-triggers/#events-delivery-guarantee","text":"NATS Streaming offers at-least-once delivery guarantee. Jetstream has additional features that get closer to \"exactly once\". In addition, in the Sensor application, an in-memory cache is implemented to cache the events IDs delivered in the last 5 minutes: this is used to make sure there won't be any duplicate events delivered. Based on this, we are able to achieve 1) \"exactly once\" in almost all cases, with the exception of pods dying while processing messages, and 2) \"at least once\" in all cases.","title":"Events Delivery Guarantee"},{"location":"sensors/more-about-sensors-and-triggers/#trigger-retries","text":"By default, there's no retry for the trigger execution, this is based on the fact that Sensor has no idea if failure retry would bring any unexpected results. If you prefer to have retry for the trigger , add retryStrategy to the spec. spec : triggers : - template : name : http-trigger http : url : https://xxxxx.com/ method : GET retryStrategy : # Give up after this many times steps : 3 Or if you want more control on the retries: spec : triggers : - retryStrategy : # Give up after this many times steps : 3 # The initial duration, use strings like \"2s\", \"1m\" duration : 2s # Duration is multiplied by factor each retry, if factor is not zero # and steps limit has not been reached. # Should not be negative # # Defaults to \"1.0\" factor : 2.0 # The sleep between each retry is the duration plus an additional # amount chosen uniformly at random from the interval between # zero and `jitter * duration`. # # Defaults to \"1\" jitter : 2","title":"Trigger Retries"},{"location":"sensors/more-about-sensors-and-triggers/#trigger-rate-limit","text":"There's no rate limit for a trigger unless you configure the spec as following: spec : triggers : - rateLimit : # Second, Minute or Hour, defaults to Second unit : Second # Requests per unit requestsPerUnit : 20","title":"Trigger Rate Limit"},{"location":"sensors/more-about-sensors-and-triggers/#revision-history-limit","text":"Optionally, a revisionHistoryLimit may be configured in the spec as following: spec : # Optional revisionHistoryLimit : 3","title":"Revision History Limit"},{"location":"sensors/more-about-sensors-and-triggers/#dead-letter-queue-trigger","text":"To help avoid data loss and dropping a message on failure after all the retries are exhausted, optionally, a dlqTrigger may be configured as following to invoke any of the 10+ triggers : spec : triggers : - template : name : http-trigger http : url : https://xxxxx.com/ method : GET # must be true for dlqTrigger atLeastOnce : true retryStrategy : steps : 3 dlqTrigger : template : name : dlq-http-trigger http : url : https://xxxxx.com/ method : PUT # must be true for dlqTrigger atLeastOnce : true # retries the dlqTrigger 5 times retryStrategy : steps : 5 If the trigger fails, it will retry up to the configured number of retries based on retryStrategy . If the maximum retries are reached and the trigger, the dlqTrigger will be invoked if specified. In order to use the dlqTrigger , the atLeastOnce must be set to true within the trigger and the dlqTrigger for the Sensor to know about the failure and invoke the dlqTrigger . note: dlqTrigger is only available for the top level trigger and not *recursively within the dlqTrigger template.","title":"Dead Letter Queue Trigger"},{"location":"sensors/transform/","text":"Event Transformation \u00b6 Available after v1.6.0 Lua Script: Executes user-defined Lua script to transform the event. JQ Command: Evaluates JQ command to transform the event. We use https://github.com/itchyny/gojq to evaluate JQ commands. Note \u00b6 If set, transformations are applied to the event before the filters are applied. Either a Lua script or a JQ command can be used for the transformation, not both. Only event data is available for the transformation and not the context. The event is discarded if the transformation fails. Lua Script \u00b6 apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example transform : script : |- event.body.message='updated' return event triggers : - template : name : webhook-workflow-trigger conditions : \"test-dep\" k8s : operation : create source : resource : apiVersion : argoproj.io/v1alpha1 kind : Workflow metadata : generateName : webhook- spec : entrypoint : whalesay arguments : parameters : - name : message # the value will get overridden by event payload from test-dep value : hello world templates : - name : whalesay inputs : parameters : - name : message container : image : docker/whalesay:latest command : [ cowsay ] args : [ \"{{inputs.parameters.message}}\" ] parameters : - src : dependencyName : test-dep dataKey : body dest : spec.arguments.parameters.0.value transform.script field defines the Lua script that gets executed when an event is received. The event data is available to Lua execution context via a global variable called event . The above script sets the value of body.message field within the event data to a new value called updated and returns the event. The type of the event variable is Table and the script must return a Table representing a valid JSON object. JQ Command \u00b6 apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example transform : jq : \".body.message *= 2\" triggers : - template : name : webhook-workflow-trigger-1 conditions : \"test-dep-foo\" k8s : operation : create source : resource : apiVersion : argoproj.io/v1alpha1 kind : Workflow metadata : generateName : webhook- spec : entrypoint : whalesay arguments : parameters : - name : message # the value will get overridden by event payload from test-dep value : hello world templates : - name : whalesay inputs : parameters : - name : message container : image : docker/whalesay:latest command : [ cowsay ] args : [ \"{{inputs.parameters.message}}\" ] parameters : - src : dependencyName : test-dep dataKey : body dest : spec.arguments.parameters.0.value The above script applies a JQ command .body.message *= 2 on the event data which appends the value of .body.message to itself and return the event. The output of the transformation must be a valid JSON object.","title":"Event Transformation"},{"location":"sensors/transform/#event-transformation","text":"Available after v1.6.0 Lua Script: Executes user-defined Lua script to transform the event. JQ Command: Evaluates JQ command to transform the event. We use https://github.com/itchyny/gojq to evaluate JQ commands.","title":"Event Transformation"},{"location":"sensors/transform/#note","text":"If set, transformations are applied to the event before the filters are applied. Either a Lua script or a JQ command can be used for the transformation, not both. Only event data is available for the transformation and not the context. The event is discarded if the transformation fails.","title":"Note"},{"location":"sensors/transform/#lua-script","text":"apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example transform : script : |- event.body.message='updated' return event triggers : - template : name : webhook-workflow-trigger conditions : \"test-dep\" k8s : operation : create source : resource : apiVersion : argoproj.io/v1alpha1 kind : Workflow metadata : generateName : webhook- spec : entrypoint : whalesay arguments : parameters : - name : message # the value will get overridden by event payload from test-dep value : hello world templates : - name : whalesay inputs : parameters : - name : message container : image : docker/whalesay:latest command : [ cowsay ] args : [ \"{{inputs.parameters.message}}\" ] parameters : - src : dependencyName : test-dep dataKey : body dest : spec.arguments.parameters.0.value transform.script field defines the Lua script that gets executed when an event is received. The event data is available to Lua execution context via a global variable called event . The above script sets the value of body.message field within the event data to a new value called updated and returns the event. The type of the event variable is Table and the script must return a Table representing a valid JSON object.","title":"Lua Script"},{"location":"sensors/transform/#jq-command","text":"apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example transform : jq : \".body.message *= 2\" triggers : - template : name : webhook-workflow-trigger-1 conditions : \"test-dep-foo\" k8s : operation : create source : resource : apiVersion : argoproj.io/v1alpha1 kind : Workflow metadata : generateName : webhook- spec : entrypoint : whalesay arguments : parameters : - name : message # the value will get overridden by event payload from test-dep value : hello world templates : - name : whalesay inputs : parameters : - name : message container : image : docker/whalesay:latest command : [ cowsay ] args : [ \"{{inputs.parameters.message}}\" ] parameters : - src : dependencyName : test-dep dataKey : body dest : spec.arguments.parameters.0.value The above script applies a JQ command .body.message *= 2 on the event data which appends the value of .body.message to itself and return the event. The output of the transformation must be a valid JSON object.","title":"JQ Command"},{"location":"sensors/trigger-conditions/","text":"Trigger Conditions \u00b6 v1.0 and after Triggers can be executed based on different dependency conditions . An example with conditions : apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : example spec : dependencies : - name : dep01 eventSourceName : webhook-a eventName : example01 - name : dep02 eventSourceName : webhook-a eventName : example02 - name : dep03 eventSourceName : webhook-b eventName : example03 triggers : - template : conditions : \"dep02\" name : trigger01 http : url : http://abc.com/hello1 method : GET - template : conditions : \"dep02 && dep03\" name : trigger02 http : url : http://abc.com/hello2 method : GET - template : conditions : \"(dep01 || dep02) && dep03\" name : trigger03 http : url : http://abc.com/hello3 method : GET Conditions is a boolean expression contains dependency names, the trigger won't be executed until the expression resolves to true. The operators in conditions include: && || Triggers Without Conditions \u00b6 If conditions is missing, the default conditions to execute the trigger is && logic of all the defined dependencies. Conditions Reset \u00b6 When multiple dependencies are defined for a trigger, the trigger won't be executed until the condition expression is resolved to true . Sometimes you might want to reset all the stakeholders of the conditions, conditions reset is the way to do it. For example, your trigger has a condition as A && B , both A and B are expected to have an event everyday. One day for some reason, A gets an event but B doesn't, then it ends up with today's A and tomorrow's B triggering an action, which might not be something you want. To avoid that, you can reset the conditions as following: spec : triggers : - template : conditions : \"dep01 && dep02\" conditionsReset : - byTime : # Reset conditions at 23:59 cron : \"59 23 * * *\" # Optional, defaults to UTC # More info for timezone: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones timezone : America/Los_Angeles name : trigger01","title":"Trigger Conditions"},{"location":"sensors/trigger-conditions/#trigger-conditions","text":"v1.0 and after Triggers can be executed based on different dependency conditions . An example with conditions : apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : example spec : dependencies : - name : dep01 eventSourceName : webhook-a eventName : example01 - name : dep02 eventSourceName : webhook-a eventName : example02 - name : dep03 eventSourceName : webhook-b eventName : example03 triggers : - template : conditions : \"dep02\" name : trigger01 http : url : http://abc.com/hello1 method : GET - template : conditions : \"dep02 && dep03\" name : trigger02 http : url : http://abc.com/hello2 method : GET - template : conditions : \"(dep01 || dep02) && dep03\" name : trigger03 http : url : http://abc.com/hello3 method : GET Conditions is a boolean expression contains dependency names, the trigger won't be executed until the expression resolves to true. The operators in conditions include: && ||","title":"Trigger Conditions"},{"location":"sensors/trigger-conditions/#triggers-without-conditions","text":"If conditions is missing, the default conditions to execute the trigger is && logic of all the defined dependencies.","title":"Triggers Without Conditions"},{"location":"sensors/trigger-conditions/#conditions-reset","text":"When multiple dependencies are defined for a trigger, the trigger won't be executed until the condition expression is resolved to true . Sometimes you might want to reset all the stakeholders of the conditions, conditions reset is the way to do it. For example, your trigger has a condition as A && B , both A and B are expected to have an event everyday. One day for some reason, A gets an event but B doesn't, then it ends up with today's A and tomorrow's B triggering an action, which might not be something you want. To avoid that, you can reset the conditions as following: spec : triggers : - template : conditions : \"dep01 && dep02\" conditionsReset : - byTime : # Reset conditions at 23:59 cron : \"59 23 * * *\" # Optional, defaults to UTC # More info for timezone: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones timezone : America/Los_Angeles name : trigger01","title":"Conditions Reset"},{"location":"sensors/filters/ctx/","text":"Context Filter \u00b6 Context filter is applied to the event context. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Fields \u00b6 Context filter has following fields: filters : context : type : event_type subject : event_subject source : event_source datacontenttype : event_data_content_type You can also specify id, specversion and time fields in the YAML manifest, but they are ignored in filtering. Note It could be useless to build a context filter based on datacontenttype , source and subject as currently they come fixed from event-source: datacontenttype is always application/json source corresponds to eventSourceName specified in the Sensor YAML manifest subject corresponds to eventName specified in the Sensor YAML manifest How it works \u00b6 Specify one or more of the available context fields: apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : with-ctx-filter spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example filters : context : source : custom-webhook Practical example \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with context filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-context.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as the sensor expects custom-webhook as the value of the source Further examples \u00b6 You can find some examples here .","title":"Context Filter"},{"location":"sensors/filters/ctx/#context-filter","text":"Context filter is applied to the event context. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } }","title":"Context Filter"},{"location":"sensors/filters/ctx/#fields","text":"Context filter has following fields: filters : context : type : event_type subject : event_subject source : event_source datacontenttype : event_data_content_type You can also specify id, specversion and time fields in the YAML manifest, but they are ignored in filtering. Note It could be useless to build a context filter based on datacontenttype , source and subject as currently they come fixed from event-source: datacontenttype is always application/json source corresponds to eventSourceName specified in the Sensor YAML manifest subject corresponds to eventName specified in the Sensor YAML manifest","title":"Fields"},{"location":"sensors/filters/ctx/#how-it-works","text":"Specify one or more of the available context fields: apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : with-ctx-filter spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example filters : context : source : custom-webhook","title":"How it works"},{"location":"sensors/filters/ctx/#practical-example","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with context filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-context.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as the sensor expects custom-webhook as the value of the source","title":"Practical example"},{"location":"sensors/filters/ctx/#further-examples","text":"You can find some examples here .","title":"Further examples"},{"location":"sensors/filters/data/","text":"Data Filter \u00b6 Data filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Data filters are applied on data within the payload. Fields \u00b6 A data filter has following fields: filters : dataLogicalOperator : logical_operator_applied data : - path : path_within_event_data type : types_of_the_data comparator : numeric_comparator value : - list_of_possible_values \u26a0\ufe0f PLEASE NOTE order in which data filters are declared corresponds to the order in which the Sensor will evaluate them. Logical operator \u00b6 Data filters can be evaluated together in 2 ways: and , meaning that all data filters returning true are required for an event to be valid or , meaning that only one data filter returning true is enough for an event to be valid Any kind of error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with dataLogicalOperator field in a Sensor dependency filters, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : data-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filters : dataLogicalOperator : \"or\" data : - path : \"a\" type : \"bool\" value : - \"true\" - path : \"b.c\" type : \"number\" value : - \"3.14\" - path : \"b.d\" type : \"string\" value : - \"hello there\" # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Data logical operator values must be lower case . How it works \u00b6 Comparator \u00b6 The data filter offers following comparators : >= > = != < <= e.g. filters : data : - path : body.value type : number comparator : \">\" value : - \"50.0\" Note : If data type is string , you can pass either an exact value or a regex. In any case that value will be evaluated as a regex. If data types is bool or float , you have to pass an exact value. Multiple paths \u00b6 If the HTTP request was less simple and contained multiple paths that you would like to filter against, you can use multipaths to combine multiple data paths in the payload into one string. For a given payload such as: { \"body\" : { \"action\" : \"opened\" , \"labels\" : [ { \"id\" : \"1234\" , \"name\" : \"Webhook\" }, { \"id\" : \"5678\" , \"name\" : \"Approved\" } ] } } We want our sensor to fire if the action is \"opened\" and it has a label of \"Webhook\" or if the action is \"closed\" and it has a label of \"Webhook\" and \"Approved\". The path would look like body.action,body.labels.#(name==\"Webhook\").name,body.labels.#(name==\"Approved\").name This would return a string like: \"opened\",\"Webhook\" or \"closed\",\"Webhook\",\"Approved\" .\\ As the resulting data type will be a string , we can pass a regex over it: filters : data : - path : 'body.action,body.labels.#(name==\"Webhook\").name,body.labels.#(name==\"Approved\").name' type : string value : - '\"opened\",\"Webhook\"' - '\"closed\",\"Webhook\",\"Approved\"' Template \u00b6 template process the incoming data defined in path through sprig template before matching with the value . e.g. filters : data : - path : body.message type : string value : - \"hello world\" template : \"{{ b64dec .Input }}\" The message '{\"message\":\"aGVsbG8gd29ybGQ=\"}' will match with the above filter definition. Note : Data type is assumed to be string before applying the template , then cast to the user defined type for value matching. Practical examples (comparator) \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with data filter kubectl - n argo - events apply - f https : //raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-data-simple-1.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as it expects for either hello or hey as the value of body.message Send another HTTP request to event-source curl -d '{\"message\":\"hello\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Look for a workflow with name starting with data-workflow- Further examples \u00b6 You can find some examples here .","title":"Data Filter"},{"location":"sensors/filters/data/#data-filter","text":"Data filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Data filters are applied on data within the payload.","title":"Data Filter"},{"location":"sensors/filters/data/#fields","text":"A data filter has following fields: filters : dataLogicalOperator : logical_operator_applied data : - path : path_within_event_data type : types_of_the_data comparator : numeric_comparator value : - list_of_possible_values \u26a0\ufe0f PLEASE NOTE order in which data filters are declared corresponds to the order in which the Sensor will evaluate them.","title":"Fields"},{"location":"sensors/filters/data/#logical-operator","text":"Data filters can be evaluated together in 2 ways: and , meaning that all data filters returning true are required for an event to be valid or , meaning that only one data filter returning true is enough for an event to be valid Any kind of error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with dataLogicalOperator field in a Sensor dependency filters, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : data-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filters : dataLogicalOperator : \"or\" data : - path : \"a\" type : \"bool\" value : - \"true\" - path : \"b.c\" type : \"number\" value : - \"3.14\" - path : \"b.d\" type : \"string\" value : - \"hello there\" # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Data logical operator values must be lower case .","title":"Logical operator"},{"location":"sensors/filters/data/#how-it-works","text":"","title":"How it works"},{"location":"sensors/filters/data/#comparator","text":"The data filter offers following comparators : >= > = != < <= e.g. filters : data : - path : body.value type : number comparator : \">\" value : - \"50.0\" Note : If data type is string , you can pass either an exact value or a regex. In any case that value will be evaluated as a regex. If data types is bool or float , you have to pass an exact value.","title":"Comparator"},{"location":"sensors/filters/data/#multiple-paths","text":"If the HTTP request was less simple and contained multiple paths that you would like to filter against, you can use multipaths to combine multiple data paths in the payload into one string. For a given payload such as: { \"body\" : { \"action\" : \"opened\" , \"labels\" : [ { \"id\" : \"1234\" , \"name\" : \"Webhook\" }, { \"id\" : \"5678\" , \"name\" : \"Approved\" } ] } } We want our sensor to fire if the action is \"opened\" and it has a label of \"Webhook\" or if the action is \"closed\" and it has a label of \"Webhook\" and \"Approved\". The path would look like body.action,body.labels.#(name==\"Webhook\").name,body.labels.#(name==\"Approved\").name This would return a string like: \"opened\",\"Webhook\" or \"closed\",\"Webhook\",\"Approved\" .\\ As the resulting data type will be a string , we can pass a regex over it: filters : data : - path : 'body.action,body.labels.#(name==\"Webhook\").name,body.labels.#(name==\"Approved\").name' type : string value : - '\"opened\",\"Webhook\"' - '\"closed\",\"Webhook\",\"Approved\"'","title":"Multiple paths"},{"location":"sensors/filters/data/#template","text":"template process the incoming data defined in path through sprig template before matching with the value . e.g. filters : data : - path : body.message type : string value : - \"hello world\" template : \"{{ b64dec .Input }}\" The message '{\"message\":\"aGVsbG8gd29ybGQ=\"}' will match with the above filter definition. Note : Data type is assumed to be string before applying the template , then cast to the user defined type for value matching.","title":"Template"},{"location":"sensors/filters/data/#practical-examples-comparator","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with data filter kubectl - n argo - events apply - f https : //raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-data-simple-1.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as it expects for either hello or hey as the value of body.message Send another HTTP request to event-source curl -d '{\"message\":\"hello\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Look for a workflow with name starting with data-workflow-","title":"Practical examples (comparator)"},{"location":"sensors/filters/data/#further-examples","text":"You can find some examples here .","title":"Further examples"},{"location":"sensors/filters/expr/","text":"Expr filter \u00b6 Expr filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Expr filters are applied on data within the payload. Fields \u00b6 An expr filter has following fields: filters : exprLogicalOperator : logical_operator_applied exprs : - expr : expression_to_evaluate fields : - name : parameter_name path : path_to_parameter_value \u26a0\ufe0f PLEASE NOTE order in which expr filters are declared corresponds to the order in which the Sensor will evaluate them. Logical operator \u00b6 Expr filters can be evaluated together in 2 ways: and , meaning that all expr filters returning true are required for an event to be valid or , meaning that only one expr filter returning true is enough for an event to be valid Any kind of error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with exprLogicalOperator field in a Sensor dependency filters, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : data-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filters : exprLogicalOperator : \"or\" exprs : - expr : a == \"b\" || c != 10 fields : - name : a path : a - name : c path : c - expr : e == false fields : - name : e path : d.e # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Expr logical operator values must be lower case . How it works \u00b6 The expr field defines the expression to be evaluated. The fields stanza defines name and path of each parameter used in the expression. name is arbitrary and used in the expr , path defines how to find the value in the data payload then to be assigned to a parameter. The expr filter evaluates the expression contained in expr using govaluate . This library leverages an incredible flexibility and power. With govaluate we are able to define complex combination of arithmetic ( - , * , / , ** , % ), negation ( - ), inversion ( ! ), bitwise not ( ~ ), logical ( && , || ), ternary conditional ( ? , : ) operators, together with comparators ( > , < , >= , <= ), comma-separated arrays and custom functions. Here some examples: action =~ \"start\" action == \"end\" && started == true action =~ \"start\" || (started == true && instances == 2) To discover all options offered by govaluate, take a look at its manual . Practical example \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with expr filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-expressions.yaml Send an HTTP request to event-source curl -d '{ \"a\": \"b\", \"c\": 11, \"d\": { \"e\": true } }' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as the sensor expects e == false Send another HTTP request to event-source curl -d '{ \"a\": \"b\", \"c\": 11, \"d\": { \"e\": false } }' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Look for a workflow with name starting with expr-workflow- Further examples \u00b6 You can find some examples here .","title":"Expr filter"},{"location":"sensors/filters/expr/#expr-filter","text":"Expr filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Expr filters are applied on data within the payload.","title":"Expr filter"},{"location":"sensors/filters/expr/#fields","text":"An expr filter has following fields: filters : exprLogicalOperator : logical_operator_applied exprs : - expr : expression_to_evaluate fields : - name : parameter_name path : path_to_parameter_value \u26a0\ufe0f PLEASE NOTE order in which expr filters are declared corresponds to the order in which the Sensor will evaluate them.","title":"Fields"},{"location":"sensors/filters/expr/#logical-operator","text":"Expr filters can be evaluated together in 2 ways: and , meaning that all expr filters returning true are required for an event to be valid or , meaning that only one expr filter returning true is enough for an event to be valid Any kind of error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with exprLogicalOperator field in a Sensor dependency filters, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : data-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filters : exprLogicalOperator : \"or\" exprs : - expr : a == \"b\" || c != 10 fields : - name : a path : a - name : c path : c - expr : e == false fields : - name : e path : d.e # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Expr logical operator values must be lower case .","title":"Logical operator"},{"location":"sensors/filters/expr/#how-it-works","text":"The expr field defines the expression to be evaluated. The fields stanza defines name and path of each parameter used in the expression. name is arbitrary and used in the expr , path defines how to find the value in the data payload then to be assigned to a parameter. The expr filter evaluates the expression contained in expr using govaluate . This library leverages an incredible flexibility and power. With govaluate we are able to define complex combination of arithmetic ( - , * , / , ** , % ), negation ( - ), inversion ( ! ), bitwise not ( ~ ), logical ( && , || ), ternary conditional ( ? , : ) operators, together with comparators ( > , < , >= , <= ), comma-separated arrays and custom functions. Here some examples: action =~ \"start\" action == \"end\" && started == true action =~ \"start\" || (started == true && instances == 2) To discover all options offered by govaluate, take a look at its manual .","title":"How it works"},{"location":"sensors/filters/expr/#practical-example","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with expr filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-expressions.yaml Send an HTTP request to event-source curl -d '{ \"a\": \"b\", \"c\": 11, \"d\": { \"e\": true } }' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice in sensor logs that the event is invalid as the sensor expects e == false Send another HTTP request to event-source curl -d '{ \"a\": \"b\", \"c\": 11, \"d\": { \"e\": false } }' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Look for a workflow with name starting with expr-workflow-","title":"Practical example"},{"location":"sensors/filters/expr/#further-examples","text":"You can find some examples here .","title":"Further examples"},{"location":"sensors/filters/intro/","text":"Introduction \u00b6 Filters provide a powerful mechanism to apply constraints on the events in order to determine a validity. If filters determine an event is valid, this will trigger the action defined by the Sensor. If filters determine an event is not valid, this won't trigger any action. Types \u00b6 Argo Events offers 5 types of filters: Expr Filter Data Filter Script Filter Context Filter Time Filter \u26a0\ufe0f PLEASE NOTE this is the order in which Sensor evaluates filter types: expr, data, context, time. Logical operator \u00b6 Filter types can be evaluated together in 2 ways: and , meaning that all filters returning true are required for an event to be valid or , meaning that only one filter returning true is enough for an event to be valid Any kind of filter error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with filtersLogicalOperator field in a Sensor dependency, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : multiple-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filtersLogicalOperator : \"or\" filters : # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Logical operator values must be lower case . Examples \u00b6 You can find some examples here .","title":"Introduction"},{"location":"sensors/filters/intro/#introduction","text":"Filters provide a powerful mechanism to apply constraints on the events in order to determine a validity. If filters determine an event is valid, this will trigger the action defined by the Sensor. If filters determine an event is not valid, this won't trigger any action.","title":"Introduction"},{"location":"sensors/filters/intro/#types","text":"Argo Events offers 5 types of filters: Expr Filter Data Filter Script Filter Context Filter Time Filter \u26a0\ufe0f PLEASE NOTE this is the order in which Sensor evaluates filter types: expr, data, context, time.","title":"Types"},{"location":"sensors/filters/intro/#logical-operator","text":"Filter types can be evaluated together in 2 ways: and , meaning that all filters returning true are required for an event to be valid or , meaning that only one filter returning true is enough for an event to be valid Any kind of filter error is considered as false (e.g. path not existing in event body). Such behaviour can be configured with filtersLogicalOperator field in a Sensor dependency, e.g. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : multiple-filters-example spec : dependencies : - name : sample-dependency eventSourceName : webhook eventName : sample-event filtersLogicalOperator : \"or\" filters : # ... Available values: \"\" (empty), defaulting to and and , default behaviour or \u26a0\ufe0f PLEASE NOTE Logical operator values must be lower case .","title":"Logical operator"},{"location":"sensors/filters/intro/#examples","text":"You can find some examples here .","title":"Examples"},{"location":"sensors/filters/script/","text":"Script filter \u00b6 Script filters can be used to filter the events with LUA scripts. Script filters are applied to the event data . A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {} } } Fields \u00b6 An Script filter can be defined under filters with a field script : filters : script : |- if event.body.a == \"b\" and event.body.d.e == \"z\" then return true else return false end Practical example \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with context filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-script.yaml Send an HTTP request to the event-source kubectl port-forward svc/webhook-eventsource-svc 12000 curl -d '{\"hello\": \"world\"}' -X POST http://localhost:12000/example You will notice in sensor logs that the event did not trigger anything. Send another HTTP request the event-source curl -X POST -d '{\"a\": \"b\", \"d\": {\"e\": \"z\"}}' http://localhost:12000/example Then you will see the event successfully triggered a workflow creation.","title":"Script filter"},{"location":"sensors/filters/script/#script-filter","text":"Script filters can be used to filter the events with LUA scripts. Script filters are applied to the event data . A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {} } }","title":"Script filter"},{"location":"sensors/filters/script/#fields","text":"An Script filter can be defined under filters with a field script : filters : script : |- if event.body.a == \"b\" and event.body.d.e == \"z\" then return true else return false end","title":"Fields"},{"location":"sensors/filters/script/#practical-example","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with context filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-script.yaml Send an HTTP request to the event-source kubectl port-forward svc/webhook-eventsource-svc 12000 curl -d '{\"hello\": \"world\"}' -X POST http://localhost:12000/example You will notice in sensor logs that the event did not trigger anything. Send another HTTP request the event-source curl -X POST -d '{\"a\": \"b\", \"d\": {\"e\": \"z\"}}' http://localhost:12000/example Then you will see the event successfully triggered a workflow creation.","title":"Practical example"},{"location":"sensors/filters/time/","text":"Time Filter \u00b6 Time filter is applied to the event time, contained in the event context. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } It filters out events occurring outside the specified time range, so it is specially helpful when you need to make sure an event occurs between a certain time-frame. Fields \u00b6 Time filter has following fields: filters : time : start : time_range_start_utc stop : time_range_end_utc How it works \u00b6 Time filter takes a start and stop time in HH:MM:SS format in UTC. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : with-time-filter spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example filters : time : start : \"02:30:00\" stop : \"04:30:00\" If stop is smaller than start ( stop < start ), the stop time is treated as next day of start . Note : start is inclusive while stop is exclusive. Time filter behaviour visually explained \u00b6 if start < stop : event time must be in [start, stop) . 00:00:00 00:00:00 00:00:00 \u2503 start stop \u2503 start stop \u2503 \u2500\u2538\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2538\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2538\u2500 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f if stop < start : event time must be in [start, stop@Next day) (this is equivalent to: event time must be in [00:00:00, stop) || [start, 00:00:00@Next day) ). 00:00:00 00:00:00 00:00:00 \u2503 stop start \u2503 stop start \u2503 \u2500\u2538\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2538\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2538\u2500 \u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500 Practical example \u00b6 Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with time filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-time.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice one of following behaviours: if you run this example between 02:30 and 04:30, the sensor logs the event is valid if you run this example outside time range between 02:30 and 04:30, the sensor logs the event is invalid Further examples \u00b6 You can find some examples here .","title":"Time Filter"},{"location":"sensors/filters/time/#time-filter","text":"Time filter is applied to the event time, contained in the event context. A CloudEvent from Webhook event-source has payload structure as: { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } It filters out events occurring outside the specified time range, so it is specially helpful when you need to make sure an event occurs between a certain time-frame.","title":"Time Filter"},{"location":"sensors/filters/time/#fields","text":"Time filter has following fields: filters : time : start : time_range_start_utc stop : time_range_end_utc","title":"Fields"},{"location":"sensors/filters/time/#how-it-works","text":"Time filter takes a start and stop time in HH:MM:SS format in UTC. apiVersion : argoproj.io/v1alpha1 kind : Sensor metadata : name : with-time-filter spec : template : serviceAccountName : operate-workflow-sa dependencies : - name : test-dep eventSourceName : webhook eventName : example filters : time : start : \"02:30:00\" stop : \"04:30:00\" If stop is smaller than start ( stop < start ), the stop time is treated as next day of start . Note : start is inclusive while stop is exclusive.","title":"How it works"},{"location":"sensors/filters/time/#time-filter-behaviour-visually-explained","text":"if start < stop : event time must be in [start, stop) . 00:00:00 00:00:00 00:00:00 \u2503 start stop \u2503 start stop \u2503 \u2500\u2538\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2538\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2538\u2500 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f if stop < start : event time must be in [start, stop@Next day) (this is equivalent to: event time must be in [00:00:00, stop) || [start, 00:00:00@Next day) ). 00:00:00 00:00:00 00:00:00 \u2503 stop start \u2503 stop start \u2503 \u2500\u2538\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2538\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cb\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2538\u2500 \u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2570\u2500\u2500\u2500\u2500\u2500\u2500 OK \u2500\u2500\u2500","title":"Time filter behaviour visually explained"},{"location":"sensors/filters/time/#practical-example","text":"Create a webhook event-source kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a webhook sensor with time filter kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-time.yaml Send an HTTP request to event-source curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice one of following behaviours: if you run this example between 02:30 and 04:30, the sensor logs the event is valid if you run this example outside time range between 02:30 and 04:30, the sensor logs the event is invalid","title":"Practical example"},{"location":"sensors/filters/time/#further-examples","text":"You can find some examples here .","title":"Further examples"},{"location":"sensors/triggers/argo-workflow/","text":"Argo Workflow Trigger \u00b6 Argo workflow is K8s custom resource which help orchestrating parallel jobs on Kubernetes. Trigger a workflow \u00b6 Note: You will need to have Argo Workflows installed to make this work. Make sure to have the eventbus deployed in the namespace. We will use webhook event-source and sensor to trigger an Argo workflow. Set up the operate-workflow-sa service account that the sensor will use kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/special-workflow-trigger-shortened.yaml Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example List the workflow using argo list . Parameterization \u00b6 Similar to other type of triggers, sensor offers parameterization for the Argo workflow trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate the workflow object values on the fly. You can learn more about trigger parameterization here . Policy \u00b6 Trigger policy helps you determine the status of the triggered Argo workflow object and decide whether to stop or continue sensor. Take a look at K8s Trigger Policy . Argo CLI \u00b6 In addition to the example above, you can leverage other functionalities provided by the Argo CLI such as, Submit Submit --from Resubmit Resume Retry Suspend Terminate Stop To make use of Argo CLI operations in argoWorkflow trigger template, argoWorkflow: operation: submit # submit, submit-from, resubmit, resume, retry, suspend, terminate or stop Complete example is available here .","title":"Argo Workflow Trigger"},{"location":"sensors/triggers/argo-workflow/#argo-workflow-trigger","text":"Argo workflow is K8s custom resource which help orchestrating parallel jobs on Kubernetes.","title":"Argo Workflow Trigger"},{"location":"sensors/triggers/argo-workflow/#trigger-a-workflow","text":"Note: You will need to have Argo Workflows installed to make this work. Make sure to have the eventbus deployed in the namespace. We will use webhook event-source and sensor to trigger an Argo workflow. Set up the operate-workflow-sa service account that the sensor will use kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/special-workflow-trigger-shortened.yaml Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example List the workflow using argo list .","title":"Trigger a workflow"},{"location":"sensors/triggers/argo-workflow/#parameterization","text":"Similar to other type of triggers, sensor offers parameterization for the Argo workflow trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate the workflow object values on the fly. You can learn more about trigger parameterization here .","title":"Parameterization"},{"location":"sensors/triggers/argo-workflow/#policy","text":"Trigger policy helps you determine the status of the triggered Argo workflow object and decide whether to stop or continue sensor. Take a look at K8s Trigger Policy .","title":"Policy"},{"location":"sensors/triggers/argo-workflow/#argo-cli","text":"In addition to the example above, you can leverage other functionalities provided by the Argo CLI such as, Submit Submit --from Resubmit Resume Retry Suspend Terminate Stop To make use of Argo CLI operations in argoWorkflow trigger template, argoWorkflow: operation: submit # submit, submit-from, resubmit, resume, retry, suspend, terminate or stop Complete example is available here .","title":"Argo CLI"},{"location":"sensors/triggers/aws-lambda/","text":"AWS Lambda \u00b6 AWS Lambda provides a tremendous value, but the event driven lambda invocation is limited to SNS, SQS and few other event sources. Argo Events makes it easy to integrate lambda with event sources that are not native to AWS. Trigger A Simple Lambda \u00b6 Make sure to have eventbus deployed in the namespace. Make sure your AWS account has permissions to execute Lambda. More info on AWS permissions is available here . Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Create a basic lambda function called hello either using AWS cli or console. exports . handler = async ( event , context ) => { console . log ( 'name =' , event . name ); return event . name ; }; Let's set up webhook event-source to invoke the lambda over http requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Let's expose the webhook event-source using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Deploy the webhook sensor with AWS Lambda trigger. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-lambda-trigger.yaml Once the sensor pod is in running state, make a curl request to webhook event-source pod, curl -d '{\"name\":\"foo\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example It will trigger the AWS Lambda function hello . Look at the CloudWatch logs to verify. Specification \u00b6 The AWS Lambda trigger specification is available here . Request Payload \u00b6 Invoking the AWS Lambda without a request payload would not be very useful. The lambda trigger within a sensor is invoked when sensor receives an event from the eventbus. In order to construct a request payload based on the event data, sensor offers payload field as a part of the lambda trigger. Let's examine a lambda trigger, awsLambda : functionName : hello accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . name dest : name The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a request payload like below, { \"name\": \"foo\" // name field from event data } The above payload will be passed in the request to invoke the AWS lambda. You can add however many number of src and dest under payload . Note : Take a look at Parameterization in order to understand how to extract particular key-value from event data. Parameterization \u00b6 Similar to other type of triggers, sensor offers parameterization for the AWS Lambda trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate values like function name, payload values on the fly. Consider a scenario where you don't want to hard-code the function name and let the event data populate it. awsLambda : functionName : hello // this will be replaced . accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . message dest : message parameters : - src : dependencyName : test - dep dataKey : body . function_name dest : functionName With parameters the sensor will replace the function name hello with the value of field function_name from event data. You can learn more about trigger parameterization here . Policy \u00b6 Trigger policy helps you determine the status of the lambda invocation and decide whether to stop or continue sensor. To determine whether the lambda was successful or not, Lambda trigger provides a Status policy. The Status holds a list of response statuses that are considered valid. awsLambda : functionName : hello accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . message dest : message policy : status : allow : - 200 - 201 The above lambda trigger will be treated successful only if its invocation returns with either 200 or 201 status.","title":"AWS Lambda"},{"location":"sensors/triggers/aws-lambda/#aws-lambda","text":"AWS Lambda provides a tremendous value, but the event driven lambda invocation is limited to SNS, SQS and few other event sources. Argo Events makes it easy to integrate lambda with event sources that are not native to AWS.","title":"AWS Lambda"},{"location":"sensors/triggers/aws-lambda/#trigger-a-simple-lambda","text":"Make sure to have eventbus deployed in the namespace. Make sure your AWS account has permissions to execute Lambda. More info on AWS permissions is available here . Fetch your access and secret key for AWS account and base64 encode them. Create a secret called aws-secret as follows. apiVersion : v1 kind : Secret metadata : name : aws - secret type : Opaque data : accesskey : < base64 - access - key > secretkey : < base64 - secret - key > Create a basic lambda function called hello either using AWS cli or console. exports . handler = async ( event , context ) => { console . log ( 'name =' , event . name ); return event . name ; }; Let's set up webhook event-source to invoke the lambda over http requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Let's expose the webhook event-source using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Deploy the webhook sensor with AWS Lambda trigger. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/aws-lambda-trigger.yaml Once the sensor pod is in running state, make a curl request to webhook event-source pod, curl -d '{\"name\":\"foo\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example It will trigger the AWS Lambda function hello . Look at the CloudWatch logs to verify.","title":"Trigger A Simple Lambda"},{"location":"sensors/triggers/aws-lambda/#specification","text":"The AWS Lambda trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/aws-lambda/#request-payload","text":"Invoking the AWS Lambda without a request payload would not be very useful. The lambda trigger within a sensor is invoked when sensor receives an event from the eventbus. In order to construct a request payload based on the event data, sensor offers payload field as a part of the lambda trigger. Let's examine a lambda trigger, awsLambda : functionName : hello accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . name dest : name The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a request payload like below, { \"name\": \"foo\" // name field from event data } The above payload will be passed in the request to invoke the AWS lambda. You can add however many number of src and dest under payload . Note : Take a look at Parameterization in order to understand how to extract particular key-value from event data.","title":"Request Payload"},{"location":"sensors/triggers/aws-lambda/#parameterization","text":"Similar to other type of triggers, sensor offers parameterization for the AWS Lambda trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate values like function name, payload values on the fly. Consider a scenario where you don't want to hard-code the function name and let the event data populate it. awsLambda : functionName : hello // this will be replaced . accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . message dest : message parameters : - src : dependencyName : test - dep dataKey : body . function_name dest : functionName With parameters the sensor will replace the function name hello with the value of field function_name from event data. You can learn more about trigger parameterization here .","title":"Parameterization"},{"location":"sensors/triggers/aws-lambda/#policy","text":"Trigger policy helps you determine the status of the lambda invocation and decide whether to stop or continue sensor. To determine whether the lambda was successful or not, Lambda trigger provides a Status policy. The Status holds a list of response statuses that are considered valid. awsLambda : functionName : hello accessKey : name : aws - secret key : accesskey secretKey : name : aws - secret key : secretkey namespace : argo - events region : us - east - 1 payload : - src : dependencyName : test - dep dataKey : body . message dest : message policy : status : allow : - 200 - 201 The above lambda trigger will be treated successful only if its invocation returns with either 200 or 201 status.","title":"Policy"},{"location":"sensors/triggers/azure-event-hubs/","text":"Azure Event Hubs \u00b6 Azure Event Hubs Trigger allows a sensor to publish events to Azure Event Hubs . Argo Events integrates with Azure Event Hubs to stream data from an EventSource NOTE: Parametrization for fqdn and hubName values are not yet supported. Specification \u00b6 The Azure Event Hubs trigger specification is available here . Send an Event to Azure Event Hubs \u00b6 Make sure to have the eventbus deployed in the namespace. Create an event hub . Make sure that the Shared Access Key used to connect to Azure Event Hubs has the Send policy. Get the Primary Key of the Shared Access Policy, the Name of the Shared Access Policy, the Hub Name , and the FQDN of the Azure Event Hubs Namespace. Create a secret called azure-event-hubs-secret as follows: NOTE: sharedAccessKey refers to the Primary Key and sharedAccessKeyName refers to the Name of the Shared Access Policy. apiVersion : v1 kind : Secret metadata : name : azure - event - hubs - secret type : Opaque data : sharedAccessKey : < base64 - shared - access - key > sharedAccessKeyName : < base64 - shared - access - key - name > Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the sensor with the following template. Replace the necessary values for fqdn and hubName : apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : azure - events - hub spec : dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : azure - eventhubs - trigger azureEventHubs : # FQDN of the EventsHub namespace you created # More info at https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string fqdn : eventhubs_fqdn sharedAccessKeyName : name : azure - event - hubs - secret key : sharedAccessKeyName sharedAccessKey : name : azure - event - hubs - secret key : sharedAccessKey # Event Hub path/name hubName : hub_name payload : - src : dependencyName : test - dep dataKey : body . message dest : message The Event needs a body. In order to construct a messaged based on your event data, the Azure Event Hubs sensor has the payload field as part of the trigger. The payload contains the list of src which refers to the source events and dest which refers to destination key within the resulting request payload. The payload declared above will generate a message body like below, { \"message\": \"some message here\" // name/key of the object } Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Verify Events have been in ingested in Azure Events Hub by creating a listener app or following other code samples . You can optionally create an Azure Event Hubs Event Source .","title":"Azure Event Hubs"},{"location":"sensors/triggers/azure-event-hubs/#azure-event-hubs","text":"Azure Event Hubs Trigger allows a sensor to publish events to Azure Event Hubs . Argo Events integrates with Azure Event Hubs to stream data from an EventSource NOTE: Parametrization for fqdn and hubName values are not yet supported.","title":"Azure Event Hubs"},{"location":"sensors/triggers/azure-event-hubs/#specification","text":"The Azure Event Hubs trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/azure-event-hubs/#send-an-event-to-azure-event-hubs","text":"Make sure to have the eventbus deployed in the namespace. Create an event hub . Make sure that the Shared Access Key used to connect to Azure Event Hubs has the Send policy. Get the Primary Key of the Shared Access Policy, the Name of the Shared Access Policy, the Hub Name , and the FQDN of the Azure Event Hubs Namespace. Create a secret called azure-event-hubs-secret as follows: NOTE: sharedAccessKey refers to the Primary Key and sharedAccessKeyName refers to the Name of the Shared Access Policy. apiVersion : v1 kind : Secret metadata : name : azure - event - hubs - secret type : Opaque data : sharedAccessKey : < base64 - shared - access - key > sharedAccessKeyName : < base64 - shared - access - key - name > Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the sensor with the following template. Replace the necessary values for fqdn and hubName : apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : azure - events - hub spec : dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : azure - eventhubs - trigger azureEventHubs : # FQDN of the EventsHub namespace you created # More info at https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string fqdn : eventhubs_fqdn sharedAccessKeyName : name : azure - event - hubs - secret key : sharedAccessKeyName sharedAccessKey : name : azure - event - hubs - secret key : sharedAccessKey # Event Hub path/name hubName : hub_name payload : - src : dependencyName : test - dep dataKey : body . message dest : message The Event needs a body. In order to construct a messaged based on your event data, the Azure Event Hubs sensor has the payload field as part of the trigger. The payload contains the list of src which refers to the source events and dest which refers to destination key within the resulting request payload. The payload declared above will generate a message body like below, { \"message\": \"some message here\" // name/key of the object } Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Verify Events have been in ingested in Azure Events Hub by creating a listener app or following other code samples . You can optionally create an Azure Event Hubs Event Source .","title":"Send an Event to Azure Event Hubs"},{"location":"sensors/triggers/azure-service-bus/","text":"Azure Service Bus \u00b6 Service Bus Trigger allows a sensor to send messages to Azure Service Bus queues and topics. Specification \u00b6 The Azure Service Bus trigger specification is available here . Setup \u00b6 Create a queue called test either using Azure CLI or Azure Service Bus management console. Fetch your connection string for Azure Service Bus and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus-sensor.yaml The Service Bus message needs a body. In order to construct a messaged based on your event data, the Azure Service Bus sensor has the payload field as part of the trigger. The payload declared above will generate a message body like below, { \"message\": \"some message here\" // name/key of the object } Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example. curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example","title":"Azure Service Bus"},{"location":"sensors/triggers/azure-service-bus/#azure-service-bus","text":"Service Bus Trigger allows a sensor to send messages to Azure Service Bus queues and topics.","title":"Azure Service Bus"},{"location":"sensors/triggers/azure-service-bus/#specification","text":"The Azure Service Bus trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/azure-service-bus/#setup","text":"Create a queue called test either using Azure CLI or Azure Service Bus management console. Fetch your connection string for Azure Service Bus and base64 encode it. Create a secret called azure-secret as follows. apiVersion : v1 kind : Secret metadata : name : azure - secret type : Opaque data : connectionstring : < base64 - connection - string > Deploy the secret. kubectl -n argo-events apply -f azure-secret.yaml Let's set up a webhook event-source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create a sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus-sensor.yaml The Service Bus message needs a body. In order to construct a messaged based on your event data, the Azure Service Bus sensor has the payload field as part of the trigger. The payload declared above will generate a message body like below, { \"message\": \"some message here\" // name/key of the object } Let's expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example. curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example","title":"Setup"},{"location":"sensors/triggers/build-your-own-trigger/","text":"Build Your Own Trigger \u00b6 Argo Events supports a variety of triggers out of box like Argo Workflow, K8s Objects, AWS Lambda, HTTP Requests etc., but you may want to write your own logic to trigger a pipeline or create an object in K8s cluster. An example would be to trigger TektonCD or AirFlow pipelines on GitHub events. Custom Trigger \u00b6 In order to plug your own implementation for a trigger with Argo Events Sensor, you need to run a gRPC server that implements the interface that the sensor expects. Interface \u00b6 The interface exposed via proto file, // Trigger offers services to build a custom trigger service Trigger { // FetchResource fetches the resource to be triggered. rpc FetchResource ( FetchResourceRequest ) returns ( FetchResourceResponse ); // Execute executes the requested trigger resource. rpc Execute ( ExecuteRequest ) returns ( ExecuteResponse ); // ApplyPolicy applies policies on the trigger execution result. rpc ApplyPolicy ( ApplyPolicyRequest ) returns ( ApplyPolicyResponse ); } The complete proto file is available here . Let's walk through the contract, FetchResource : If the trigger server needs to fetch a resource from external sources like S3, Git or a URL, this is the place to do so. e.g. if the trigger server aims to invoke a TektonCD pipeline and the PipelineRun resource lives on Git, then trigger server can first fetch it from Git and return it back to sensor. Execute : In this method, the trigger server executes/invokes the trigger. e.g. TektonCD pipeline resource being created in K8s cluster. ApplyPolicy : This is where your trigger implementation can check whether the triggered resource transitioned into the success state. Depending upon the response from the trigger server, the sensor will either stop processing subsequent triggers, or it will continue to process them. How to define the Custom Trigger in a sensor? \u00b6 Let's look at the following sensor, apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : webhook - sensor spec : dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : webhook - workflow - trigger custom : # the url of the trigger server. serverURL : tekton - trigger . argo - events . svc : 9000 # spec is map of string->string and it is sent over to trigger server. # the spec can be anything you want as per your use-case, just make sure the trigger server understands the spec map. spec : url : \"https://raw.githubusercontent.com/VaibhavPage/tekton-cd-trigger/master/example.yaml\" # These parameters are applied on resource fetched and returned by the trigger server. # e.g. consider a trigger server which invokes TektonCD pipeline runs, then # the trigger server can return a TektonCD PipelineRun resource. # The parameters are then applied on that PipelineRun resource. parameters : - src : dependencyName : test - dep dataKey : body . namespace dest : metadata . namespace # These parameters are applied on entire template body. # So that you can parameterize anything under `custom` key such as `serverURL`, `spec` etc. parameters : - src : dependencyName : test - dep dataKey : body . url dest : custom . spec . url The sensor definition should look familiar to you. The only difference is the custom key under triggers -> template . The specification under custom key defines the custom trigger. The most important fields are, serverURL : This is the URL of the trigger gRPC server. spec : It is a map of string -> string. The spec can be anything you want as per your use-case. The sensor sends the spec to trigger server, and it is upto the trigger gRPC server to interpret the spec. parameters : The parameters override the resource that is fetched by the trigger server. Read more info on parameters here . payload : Payload to send to the trigger server. Read more on payload here . The complete spec for the custom trigger is available here . Custom Trigger in Action \u00b6 Refer to a sample trigger server that invokes TektonCD pipeline on events.","title":"Build Your Own Trigger"},{"location":"sensors/triggers/build-your-own-trigger/#build-your-own-trigger","text":"Argo Events supports a variety of triggers out of box like Argo Workflow, K8s Objects, AWS Lambda, HTTP Requests etc., but you may want to write your own logic to trigger a pipeline or create an object in K8s cluster. An example would be to trigger TektonCD or AirFlow pipelines on GitHub events.","title":"Build Your Own Trigger"},{"location":"sensors/triggers/build-your-own-trigger/#custom-trigger","text":"In order to plug your own implementation for a trigger with Argo Events Sensor, you need to run a gRPC server that implements the interface that the sensor expects.","title":"Custom Trigger"},{"location":"sensors/triggers/build-your-own-trigger/#interface","text":"The interface exposed via proto file, // Trigger offers services to build a custom trigger service Trigger { // FetchResource fetches the resource to be triggered. rpc FetchResource ( FetchResourceRequest ) returns ( FetchResourceResponse ); // Execute executes the requested trigger resource. rpc Execute ( ExecuteRequest ) returns ( ExecuteResponse ); // ApplyPolicy applies policies on the trigger execution result. rpc ApplyPolicy ( ApplyPolicyRequest ) returns ( ApplyPolicyResponse ); } The complete proto file is available here . Let's walk through the contract, FetchResource : If the trigger server needs to fetch a resource from external sources like S3, Git or a URL, this is the place to do so. e.g. if the trigger server aims to invoke a TektonCD pipeline and the PipelineRun resource lives on Git, then trigger server can first fetch it from Git and return it back to sensor. Execute : In this method, the trigger server executes/invokes the trigger. e.g. TektonCD pipeline resource being created in K8s cluster. ApplyPolicy : This is where your trigger implementation can check whether the triggered resource transitioned into the success state. Depending upon the response from the trigger server, the sensor will either stop processing subsequent triggers, or it will continue to process them.","title":"Interface"},{"location":"sensors/triggers/build-your-own-trigger/#how-to-define-the-custom-trigger-in-a-sensor","text":"Let's look at the following sensor, apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : webhook - sensor spec : dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : webhook - workflow - trigger custom : # the url of the trigger server. serverURL : tekton - trigger . argo - events . svc : 9000 # spec is map of string->string and it is sent over to trigger server. # the spec can be anything you want as per your use-case, just make sure the trigger server understands the spec map. spec : url : \"https://raw.githubusercontent.com/VaibhavPage/tekton-cd-trigger/master/example.yaml\" # These parameters are applied on resource fetched and returned by the trigger server. # e.g. consider a trigger server which invokes TektonCD pipeline runs, then # the trigger server can return a TektonCD PipelineRun resource. # The parameters are then applied on that PipelineRun resource. parameters : - src : dependencyName : test - dep dataKey : body . namespace dest : metadata . namespace # These parameters are applied on entire template body. # So that you can parameterize anything under `custom` key such as `serverURL`, `spec` etc. parameters : - src : dependencyName : test - dep dataKey : body . url dest : custom . spec . url The sensor definition should look familiar to you. The only difference is the custom key under triggers -> template . The specification under custom key defines the custom trigger. The most important fields are, serverURL : This is the URL of the trigger gRPC server. spec : It is a map of string -> string. The spec can be anything you want as per your use-case. The sensor sends the spec to trigger server, and it is upto the trigger gRPC server to interpret the spec. parameters : The parameters override the resource that is fetched by the trigger server. Read more info on parameters here . payload : Payload to send to the trigger server. Read more on payload here . The complete spec for the custom trigger is available here .","title":"How to define the Custom Trigger in a sensor?"},{"location":"sensors/triggers/build-your-own-trigger/#custom-trigger-in-action","text":"Refer to a sample trigger server that invokes TektonCD pipeline on events.","title":"Custom Trigger in Action"},{"location":"sensors/triggers/email-trigger/","text":"Email Trigger \u00b6 The Email trigger is used to send a custom email to a desired set of email addresses using an SMTP server. The intended use is for notifications for a build pipeline, but can be used for any notification scenario. Prerequisite \u00b6 Deploy the eventbus in the namespace. Have an SMTP server setup. Create a kubernetes secret with the SMTP password in your cluster. kubectl create secret generic smtp-secret --from-literal=password=$SMTP_PASSWORD Note : If your SMTP server doesnot require authentication this step can be skipped. Create a webhook event-source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Set up port-forwarding to expose the http server. We will use port-forwarding here. kubectl port-forward -n argo-events 12000:12000 Email Trigger \u00b6 Lets say we want to send an email to a dynamic recepient using a custom email body template. The custom email body template we are going to use is the following: Hi , Hello There Thanks, Obi where the name has to be substituted with the receiver name from the event. Create a sensor with Email trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/email-trigger.yaml Note : Please update email.port , email.host and email.username to that of your SMTP server. If your SMTP server doesnot require authentication, the email.username and email.smtpPassword should be ommitted. Send a http request to the event-source-pod to fire the Email trigger. curl - d '{\"name\":\"Luke\", \"to\":\"your@email.com\"}' - H \"Content-Type: application/json\" - X POST http : // localhost : 12000 / example Note : You can modify the value for key \"to\" to send the email to your address. Alternatively you can skip providing the \"to\" in the payload to send an email to static email address provided in the trigger. curl -d '{\"name\":\"Luke\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Note : You have to remove the parameterization for email.to.0 and add email.to like so: email : ... to : - target1@email.com - target2@email.com ... Parameterization \u00b6 We can parameterize the to, from, subject and body of the email trigger for dynamic capabilities. The email trigger parameters have the following structure, - parameters: - src: dependencyName: test-dep dataKey: body.to dest: email.to.0 - src: dependencyName: test-dep dataKey: body.to dest: email.to.-1 - src: dependencyName: test-dep dataKey: body.from dest: email.from - src: dependencyName: test-dep dataKey: body.subject dest: email.subject - src: dependencyName: test-dep dataKey: body.emailBody dest: email.body email.to.index can be used to overwite an email address already specified in the trigger at the provided index. (where index is an integer) email.to.-1 can be used to append a new email address to the addresses to which an email will be sent. email.from can be used to specify the from address of the email sent. email.body can be used to specify the body of the email which will be sent. email.subject can be used to specify the subject of the email which will be sent. To understand more on parameterization, take a look at this tutorial . The complete specification of Email trigger is available here .","title":"Email Trigger"},{"location":"sensors/triggers/email-trigger/#email-trigger","text":"The Email trigger is used to send a custom email to a desired set of email addresses using an SMTP server. The intended use is for notifications for a build pipeline, but can be used for any notification scenario.","title":"Email Trigger"},{"location":"sensors/triggers/email-trigger/#prerequisite","text":"Deploy the eventbus in the namespace. Have an SMTP server setup. Create a kubernetes secret with the SMTP password in your cluster. kubectl create secret generic smtp-secret --from-literal=password=$SMTP_PASSWORD Note : If your SMTP server doesnot require authentication this step can be skipped. Create a webhook event-source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Set up port-forwarding to expose the http server. We will use port-forwarding here. kubectl port-forward -n argo-events 12000:12000","title":"Prerequisite"},{"location":"sensors/triggers/email-trigger/#email-trigger_1","text":"Lets say we want to send an email to a dynamic recepient using a custom email body template. The custom email body template we are going to use is the following: Hi , Hello There Thanks, Obi where the name has to be substituted with the receiver name from the event. Create a sensor with Email trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/email-trigger.yaml Note : Please update email.port , email.host and email.username to that of your SMTP server. If your SMTP server doesnot require authentication, the email.username and email.smtpPassword should be ommitted. Send a http request to the event-source-pod to fire the Email trigger. curl - d '{\"name\":\"Luke\", \"to\":\"your@email.com\"}' - H \"Content-Type: application/json\" - X POST http : // localhost : 12000 / example Note : You can modify the value for key \"to\" to send the email to your address. Alternatively you can skip providing the \"to\" in the payload to send an email to static email address provided in the trigger. curl -d '{\"name\":\"Luke\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Note : You have to remove the parameterization for email.to.0 and add email.to like so: email : ... to : - target1@email.com - target2@email.com ...","title":"Email Trigger"},{"location":"sensors/triggers/email-trigger/#parameterization","text":"We can parameterize the to, from, subject and body of the email trigger for dynamic capabilities. The email trigger parameters have the following structure, - parameters: - src: dependencyName: test-dep dataKey: body.to dest: email.to.0 - src: dependencyName: test-dep dataKey: body.to dest: email.to.-1 - src: dependencyName: test-dep dataKey: body.from dest: email.from - src: dependencyName: test-dep dataKey: body.subject dest: email.subject - src: dependencyName: test-dep dataKey: body.emailBody dest: email.body email.to.index can be used to overwite an email address already specified in the trigger at the provided index. (where index is an integer) email.to.-1 can be used to append a new email address to the addresses to which an email will be sent. email.from can be used to specify the from address of the email sent. email.body can be used to specify the body of the email which will be sent. email.subject can be used to specify the subject of the email which will be sent. To understand more on parameterization, take a look at this tutorial . The complete specification of Email trigger is available here .","title":"Parameterization"},{"location":"sensors/triggers/http-trigger/","text":"HTTP Trigger \u00b6 Argo Events offers HTTP trigger which can easily invoke serverless functions like OpenFaaS, Kubeless, Knative, Nuclio and make REST API calls. Specification \u00b6 The HTTP trigger specification is available here . REST API Calls \u00b6 Consider a scenario where your REST API server needs to consume events from event-sources S3, GitHub, SQS etc. Usually, you'd end up writing the integration yourself in the server code, although server logic has nothing to do any of the event-sources. This is where Argo Events HTTP trigger can help. The HTTP trigger takes the task of consuming events from event-sources away from API server and seamlessly integrates these events via REST API calls. We will set up a basic go http server and connect it with the Minio events. The HTTP server simply prints the request body as follows. package main import ( \"fmt\" \"io\" \"net/http\" ) func hello ( w http . ResponseWriter , req * http . Request ) { body , err := io . ReadAll ( req . Body ) if err != nil { fmt . Printf ( \"%+v \\n \" , err ) return } fmt . Println ( string ( body )) fmt . Fprintf ( w , \"hello \\n \" ) } func main () { http . HandleFunc ( \"/hello\" , hello ) fmt . Println ( \"server is listening on 8090\" ) http . ListenAndServe ( \":8090\" , nil ) } Deploy the HTTP server. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/09-http-trigger/http-server.yaml Create a service to expose the http server. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/09-http-trigger/http-server-svc.yaml Either use Ingress, OpenShift Route or port-forwarding to expose the http server. kubectl -n argo-events port-forward 8090:8090 Our goals is to seamlessly integrate Minio S3 bucket notifications with REST API server created in previous step. So, lets set up the Minio event-source available here . Don't create the sensor as we will be deploying it in next step. Create a sensor as follows. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/http-trigger.yaml Now, drop a file onto input bucket in Minio server. The sensor has triggered a http request to the http server. Take a look at the logs. server is listening on 8090 { \"type\" : \"minio\" , \"bucket\" : \"input\" } Great!!! Request Payload \u00b6 In order to construct a request payload based on the event data, sensor offers payload field as a part of the HTTP trigger. Let's examine a HTTP trigger, http : url : http : // http - server . argo - events . svc : 8090 / hello payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket - src : dependencyName : test - dep contextKey : type dest : type method : POST // GET , DELETE , POST , PUT , HEAD , etc . The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a request payload like below, { \"type\" : \"type of event from event's context\" \"bucket\" : \"bucket name from event data\" } The above payload will be passed in the HTTP request. You can add however many number of src and dest under payload . Note : Take a look at Parameterization in order to understand how to extract particular key-value from event data. Parameterization \u00b6 Similar to other type of triggers, sensor offers parameterization for the HTTP trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate values like URL, payload values on the fly. You can learn more about trigger parameterization here . Policy \u00b6 Trigger policy helps you determine the status of the HTTP request and decide whether to stop or continue sensor. To determine whether the HTTP request was successful or not, the HTTP trigger provides a Status policy. The Status holds a list of response statuses that are considered valid. http : url : http : // http - server . argo - events . svc : 8090 / hello payload : - src : dependencyName : test - dep dataKey : notification . 0 s3 . bucket . name dest : bucket - src : dependencyName : test - dep contextKey : type dest : type method : POST // GET , DELETE , POST , PUT , HEAD , etc . retryStrategy : steps : 3 duration : 3 s policy : status : allow : - 200 - 201 The above HTTP trigger will be treated successful only if the HTTP request returns with either 200 or 201 status. OpenFaaS \u00b6 OpenFaaS offers a simple way to spin up serverless functions. Lets see how we can leverage Argo Events HTTP trigger to invoke OpenFaaS function. If you don't have OpenFaaS installed, follow the instructions . Let's create a basic function. You can follow the steps . to set up the function. package function import ( \"fmt\" ) // Handle a serverless request func Handle ( req [] byte ) string { return fmt . Sprintf ( \"Hello, Go. You said: %s \" , string ( req )) } Make sure the function pod is up and running. We are going to invoke OpenFaaS function on a message on Redis Subscriber. Let's set up the Redis Database, Redis PubSub event-source as specified here . Do not create the Redis sensor, we are going to create it in next step. Let's create the sensor with OpenFaaS trigger. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : redis - sensor spec : dependencies : - name : test - dep eventSourceName : redis eventName : example triggers : - template : name : openfaas - trigger http : url : http : // gateway . openfaas . svc . cluster . local : 8080 / function / gohash payload : - src : dependencyName : test - dep dest : bucket method : POST Publish a message on FOO channel using redis-cli . PUBLISH FOO hello As soon as you publish the message, the sensor will invoke the OpenFaaS function gohash . Kubeless \u00b6 Similar to REST API calls, you can easily invoke Kubeless functions using HTTP trigger. If you don't have Kubeless installed, follow the installation . Lets create a basic function. def hello ( event , context ) : print event return event [ 'data' ] Make sure the function pod and service is created. Now, we are going to invoke the Kubeless function when a message is placed on a NATS queue. Let's set up the NATS event-source. Follow instructions for details. Do not create the NATS sensor, we are going to create it in next step. Let's create NATS sensor with HTTP trigger. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : nats - sensor spec : dependencies : - name : test - dep eventSourceName : nats eventName : example triggers : - template : name : kubeless - trigger http : serverURL : http : // hello . kubeless . svc . cluster . local : 8080 payload : - src : dependencyName : test - dep dataKey : body . first_name dest : first_name - src : dependencyName : test - dep dataKey : body . last_name dest : last_name method : POST Once event-source and sensor pod are up and running, dispatch a message on foo subject using nats client. go run main . go - s localhost foo '{\"first_name\": \"foo\", \"last_name\": \"bar\"}' It will invoke Kubeless function hello . { ' event - time ' : None , ' extensions ' : { ' request ' : < LocalRequest : POST http : //hello.kubeless.svc.cluster.local:8080/> }, 'event-type': None, 'event-namespace': None, 'data': '{\"first_name\":\"foo\",\"last_name\":\"bar\"}', 'event-id': None} Other serverless frameworks \u00b6 Similar to OpenFaaS and Kubeless invocation demonstrated above, you can easily trigger KNative, Nuclio, Fission functions using HTTP trigger.","title":"HTTP Trigger"},{"location":"sensors/triggers/http-trigger/#http-trigger","text":"Argo Events offers HTTP trigger which can easily invoke serverless functions like OpenFaaS, Kubeless, Knative, Nuclio and make REST API calls.","title":"HTTP Trigger"},{"location":"sensors/triggers/http-trigger/#specification","text":"The HTTP trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/http-trigger/#rest-api-calls","text":"Consider a scenario where your REST API server needs to consume events from event-sources S3, GitHub, SQS etc. Usually, you'd end up writing the integration yourself in the server code, although server logic has nothing to do any of the event-sources. This is where Argo Events HTTP trigger can help. The HTTP trigger takes the task of consuming events from event-sources away from API server and seamlessly integrates these events via REST API calls. We will set up a basic go http server and connect it with the Minio events. The HTTP server simply prints the request body as follows. package main import ( \"fmt\" \"io\" \"net/http\" ) func hello ( w http . ResponseWriter , req * http . Request ) { body , err := io . ReadAll ( req . Body ) if err != nil { fmt . Printf ( \"%+v \\n \" , err ) return } fmt . Println ( string ( body )) fmt . Fprintf ( w , \"hello \\n \" ) } func main () { http . HandleFunc ( \"/hello\" , hello ) fmt . Println ( \"server is listening on 8090\" ) http . ListenAndServe ( \":8090\" , nil ) } Deploy the HTTP server. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/09-http-trigger/http-server.yaml Create a service to expose the http server. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/09-http-trigger/http-server-svc.yaml Either use Ingress, OpenShift Route or port-forwarding to expose the http server. kubectl -n argo-events port-forward 8090:8090 Our goals is to seamlessly integrate Minio S3 bucket notifications with REST API server created in previous step. So, lets set up the Minio event-source available here . Don't create the sensor as we will be deploying it in next step. Create a sensor as follows. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/http-trigger.yaml Now, drop a file onto input bucket in Minio server. The sensor has triggered a http request to the http server. Take a look at the logs. server is listening on 8090 { \"type\" : \"minio\" , \"bucket\" : \"input\" } Great!!!","title":"REST API Calls"},{"location":"sensors/triggers/http-trigger/#request-payload","text":"In order to construct a request payload based on the event data, sensor offers payload field as a part of the HTTP trigger. Let's examine a HTTP trigger, http : url : http : // http - server . argo - events . svc : 8090 / hello payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket - src : dependencyName : test - dep contextKey : type dest : type method : POST // GET , DELETE , POST , PUT , HEAD , etc . The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a request payload like below, { \"type\" : \"type of event from event's context\" \"bucket\" : \"bucket name from event data\" } The above payload will be passed in the HTTP request. You can add however many number of src and dest under payload . Note : Take a look at Parameterization in order to understand how to extract particular key-value from event data.","title":"Request Payload"},{"location":"sensors/triggers/http-trigger/#parameterization","text":"Similar to other type of triggers, sensor offers parameterization for the HTTP trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate values like URL, payload values on the fly. You can learn more about trigger parameterization here .","title":"Parameterization"},{"location":"sensors/triggers/http-trigger/#policy","text":"Trigger policy helps you determine the status of the HTTP request and decide whether to stop or continue sensor. To determine whether the HTTP request was successful or not, the HTTP trigger provides a Status policy. The Status holds a list of response statuses that are considered valid. http : url : http : // http - server . argo - events . svc : 8090 / hello payload : - src : dependencyName : test - dep dataKey : notification . 0 s3 . bucket . name dest : bucket - src : dependencyName : test - dep contextKey : type dest : type method : POST // GET , DELETE , POST , PUT , HEAD , etc . retryStrategy : steps : 3 duration : 3 s policy : status : allow : - 200 - 201 The above HTTP trigger will be treated successful only if the HTTP request returns with either 200 or 201 status.","title":"Policy"},{"location":"sensors/triggers/http-trigger/#openfaas","text":"OpenFaaS offers a simple way to spin up serverless functions. Lets see how we can leverage Argo Events HTTP trigger to invoke OpenFaaS function. If you don't have OpenFaaS installed, follow the instructions . Let's create a basic function. You can follow the steps . to set up the function. package function import ( \"fmt\" ) // Handle a serverless request func Handle ( req [] byte ) string { return fmt . Sprintf ( \"Hello, Go. You said: %s \" , string ( req )) } Make sure the function pod is up and running. We are going to invoke OpenFaaS function on a message on Redis Subscriber. Let's set up the Redis Database, Redis PubSub event-source as specified here . Do not create the Redis sensor, we are going to create it in next step. Let's create the sensor with OpenFaaS trigger. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : redis - sensor spec : dependencies : - name : test - dep eventSourceName : redis eventName : example triggers : - template : name : openfaas - trigger http : url : http : // gateway . openfaas . svc . cluster . local : 8080 / function / gohash payload : - src : dependencyName : test - dep dest : bucket method : POST Publish a message on FOO channel using redis-cli . PUBLISH FOO hello As soon as you publish the message, the sensor will invoke the OpenFaaS function gohash .","title":"OpenFaaS"},{"location":"sensors/triggers/http-trigger/#kubeless","text":"Similar to REST API calls, you can easily invoke Kubeless functions using HTTP trigger. If you don't have Kubeless installed, follow the installation . Lets create a basic function. def hello ( event , context ) : print event return event [ 'data' ] Make sure the function pod and service is created. Now, we are going to invoke the Kubeless function when a message is placed on a NATS queue. Let's set up the NATS event-source. Follow instructions for details. Do not create the NATS sensor, we are going to create it in next step. Let's create NATS sensor with HTTP trigger. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : nats - sensor spec : dependencies : - name : test - dep eventSourceName : nats eventName : example triggers : - template : name : kubeless - trigger http : serverURL : http : // hello . kubeless . svc . cluster . local : 8080 payload : - src : dependencyName : test - dep dataKey : body . first_name dest : first_name - src : dependencyName : test - dep dataKey : body . last_name dest : last_name method : POST Once event-source and sensor pod are up and running, dispatch a message on foo subject using nats client. go run main . go - s localhost foo '{\"first_name\": \"foo\", \"last_name\": \"bar\"}' It will invoke Kubeless function hello . { ' event - time ' : None , ' extensions ' : { ' request ' : < LocalRequest : POST http : //hello.kubeless.svc.cluster.local:8080/> }, 'event-type': None, 'event-namespace': None, 'data': '{\"first_name\":\"foo\",\"last_name\":\"bar\"}', 'event-id': None}","title":"Kubeless"},{"location":"sensors/triggers/http-trigger/#other-serverless-frameworks","text":"Similar to OpenFaaS and Kubeless invocation demonstrated above, you can easily trigger KNative, Nuclio, Fission functions using HTTP trigger.","title":"Other serverless frameworks"},{"location":"sensors/triggers/k8s-object-trigger/","text":"Kubernetes Object Trigger \u00b6 Apart from Argo workflow objects, the sensor lets you trigger any Kubernetes objects including Custom Resources such as Pod, Deployment, Job, CronJob, etc. Having the ability to trigger Kubernetes objects is quite powerful as providing an avenue to set up event-driven pipelines for existing workloads. Trigger a K8s Pod \u00b6 We will use webhook event-source and sensor to trigger a K8s pod. Lets set up a webhook event source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml To trigger a pod, we need to create a sensor as defined below. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : create - pod - sa # A service account has privileges to create a Pod dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : webhook - pod - trigger k8s : operation : create source : resource : apiVersion : v1 kind : Pod metadata : generateName : hello - world - spec : containers : - name : hello - container args : - \"hello-world\" command : - cowsay image : \"docker/whalesay:latest\" parameters : - src : dependencyName : test - dep dest : spec . containers . 0 . args . 0 Create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/trigger-standard-k8s-resource.yaml Lets expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ / Operation \u00b6 You can specify the operation for the trigger using the operation key under triggers->template->k8s. Operation can be either. create : Creates the object if not available in K8s cluster. update : Updates the object. patch : Patches the object using given patch strategy. delete : Deletes the object if it exists. More info available at here . Parameterization \u00b6 Similar to other type of triggers, sensor offers parameterization for the K8s trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate the K8s object values on the fly. You can learn more about trigger parameterization here . Policy \u00b6 Trigger policy helps you determine the status of the triggered K8s object and decide whether to stop or continue sensor. To determine whether the K8s object was successful or not, the K8s trigger provides a Resource Labels policy. The Resource Labels holds a list of labels which are checked against the triggered K8s object to determine the status of the object. # Policy to configure backoff and execution criteria for the trigger # Because the sensor is able to trigger any K8s resource , it determines the resource state by looking at the resource 's labels. policy: k8s: # Backoff before checking the resource labels backoff: # Duration is the duration in nanoseconds duration: 1000000000 # 1 second # Duration is multiplied by factor each iteration factor: 2 # The amount of jitter applied each iteration jitter: 0.1 # Exit with error after these many steps steps: 5 # labels set on the resource decide if the resource has transitioned into the success state. labels: workflows.argoproj.io/phase: Succeeded # Determines whether trigger should be marked as failed if the backoff times out and sensor is still unable to decide the state of the trigger. # defaults to false errorOnBackoffTimeout: true Complete example is available here .","title":"Kubernetes Object Trigger"},{"location":"sensors/triggers/k8s-object-trigger/#kubernetes-object-trigger","text":"Apart from Argo workflow objects, the sensor lets you trigger any Kubernetes objects including Custom Resources such as Pod, Deployment, Job, CronJob, etc. Having the ability to trigger Kubernetes objects is quite powerful as providing an avenue to set up event-driven pipelines for existing workloads.","title":"Kubernetes Object Trigger"},{"location":"sensors/triggers/k8s-object-trigger/#trigger-a-k8s-pod","text":"We will use webhook event-source and sensor to trigger a K8s pod. Lets set up a webhook event source to process incoming requests. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml To trigger a pod, we need to create a sensor as defined below. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : webhook spec : template : serviceAccountName : create - pod - sa # A service account has privileges to create a Pod dependencies : - name : test - dep eventSourceName : webhook eventName : example triggers : - template : name : webhook - pod - trigger k8s : operation : create source : resource : apiVersion : v1 kind : Pod metadata : generateName : hello - world - spec : containers : - name : hello - container args : - \"hello-world\" command : - cowsay image : \"docker/whalesay:latest\" parameters : - src : dependencyName : test - dep dest : spec . containers . 0 . args . 0 Create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/trigger-standard-k8s-resource.yaml Lets expose the webhook event-source pod using port-forward so that we can make a request to it. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ /","title":"Trigger a K8s Pod"},{"location":"sensors/triggers/k8s-object-trigger/#operation","text":"You can specify the operation for the trigger using the operation key under triggers->template->k8s. Operation can be either. create : Creates the object if not available in K8s cluster. update : Updates the object. patch : Patches the object using given patch strategy. delete : Deletes the object if it exists. More info available at here .","title":"Operation"},{"location":"sensors/triggers/k8s-object-trigger/#parameterization","text":"Similar to other type of triggers, sensor offers parameterization for the K8s trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate the K8s object values on the fly. You can learn more about trigger parameterization here .","title":"Parameterization"},{"location":"sensors/triggers/k8s-object-trigger/#policy","text":"Trigger policy helps you determine the status of the triggered K8s object and decide whether to stop or continue sensor. To determine whether the K8s object was successful or not, the K8s trigger provides a Resource Labels policy. The Resource Labels holds a list of labels which are checked against the triggered K8s object to determine the status of the object. # Policy to configure backoff and execution criteria for the trigger # Because the sensor is able to trigger any K8s resource , it determines the resource state by looking at the resource 's labels. policy: k8s: # Backoff before checking the resource labels backoff: # Duration is the duration in nanoseconds duration: 1000000000 # 1 second # Duration is multiplied by factor each iteration factor: 2 # The amount of jitter applied each iteration jitter: 0.1 # Exit with error after these many steps steps: 5 # labels set on the resource decide if the resource has transitioned into the success state. labels: workflows.argoproj.io/phase: Succeeded # Determines whether trigger should be marked as failed if the backoff times out and sensor is still unable to decide the state of the trigger. # defaults to false errorOnBackoffTimeout: true Complete example is available here .","title":"Policy"},{"location":"sensors/triggers/kafka-trigger/","text":"Kafka Trigger \u00b6 Kafka trigger allows sensor to publish events on Kafka topic. This trigger helps source the events from outside world into your messaging queues. Specification \u00b6 The Kafka trigger specification is available here . Walkthrough \u00b6 Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Kafka topic. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : kafka - trigger kafka : # Kafka URL url : kafka . argo - events . svc : 9092 # Name of the topic topic : minio - events # partition id partition : 0 payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The Kafka message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the Kafka trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below. { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } Drop a file called hello.txt onto the bucket input and you will receive the message on Kafka topic","title":"Kafka Trigger"},{"location":"sensors/triggers/kafka-trigger/#kafka-trigger","text":"Kafka trigger allows sensor to publish events on Kafka topic. This trigger helps source the events from outside world into your messaging queues.","title":"Kafka Trigger"},{"location":"sensors/triggers/kafka-trigger/#specification","text":"The Kafka trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/kafka-trigger/#walkthrough","text":"Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Kafka topic. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : kafka - trigger kafka : # Kafka URL url : kafka . argo - events . svc : 9092 # Name of the topic topic : minio - events # partition id partition : 0 payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The Kafka message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the Kafka trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below. { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } Drop a file called hello.txt onto the bucket input and you will receive the message on Kafka topic","title":"Walkthrough"},{"location":"sensors/triggers/log/","text":"Log \u00b6 Log trigger is for debugging - it just logs events it receives as JSON: { \"level\" : \"info\" , \"ts\" : 1604783266.973979 , \"logger\" : \"argo-events.sensor\" , \"caller\" : \"log/log.go:35\" , \"msg\" : \"{\\\"eventTime\\\":\\\"2020-11-07 21:07:46.9658533 +0000 UTC m=+20468.986115001\\\"}\" , \"sensorName\" : \"log\" , \"triggerName\" : \"log-trigger\" , \"dependencyName\" : \"test-dep\" , \"eventContext\" : \"{\\\"id\\\":\\\"37363664356662642d616364322d343563332d396362622d353037653361343637393237\\\",\\\"source\\\":\\\"calendar\\\",\\\"specversion\\\":\\\"1.0\\\",\\\"type\\\":\\\"calendar\\\",\\\"datacontenttype\\\":\\\"application/json\\\",\\\"subject\\\":\\\"example-with-interval\\\",\\\"time\\\":\\\"2020-11-07T21:07:46Z\\\"}\" } Specification \u00b6 The specification is available here . Parameterization \u00b6 No parameterization is supported.","title":"Log"},{"location":"sensors/triggers/log/#log","text":"Log trigger is for debugging - it just logs events it receives as JSON: { \"level\" : \"info\" , \"ts\" : 1604783266.973979 , \"logger\" : \"argo-events.sensor\" , \"caller\" : \"log/log.go:35\" , \"msg\" : \"{\\\"eventTime\\\":\\\"2020-11-07 21:07:46.9658533 +0000 UTC m=+20468.986115001\\\"}\" , \"sensorName\" : \"log\" , \"triggerName\" : \"log-trigger\" , \"dependencyName\" : \"test-dep\" , \"eventContext\" : \"{\\\"id\\\":\\\"37363664356662642d616364322d343563332d396362622d353037653361343637393237\\\",\\\"source\\\":\\\"calendar\\\",\\\"specversion\\\":\\\"1.0\\\",\\\"type\\\":\\\"calendar\\\",\\\"datacontenttype\\\":\\\"application/json\\\",\\\"subject\\\":\\\"example-with-interval\\\",\\\"time\\\":\\\"2020-11-07T21:07:46Z\\\"}\" }","title":"Log"},{"location":"sensors/triggers/log/#specification","text":"The specification is available here .","title":"Specification"},{"location":"sensors/triggers/log/#parameterization","text":"No parameterization is supported.","title":"Parameterization"},{"location":"sensors/triggers/nats-trigger/","text":"NATS Trigger \u00b6 NATS trigger allows sensor to publish events on NATS subjects. This trigger helps source the events from outside world into your messaging queues. Specification \u00b6 The NATS trigger specification is available here . Walkthrough \u00b6 Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a NATS subject. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : nats - trigger nats : # NATS Server URL url : nats . argo - events . svc : 4222 # Name of the subject subject : minio - events payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The NATS message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the NATS trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below, { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } If you are running NATS on local K8s cluster, make sure to port-forward to pod. kubectl -n argo-events port-forward 4222:4222 Subscribe to the subject called minio-events . Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe . go run main . go - s localhost minio - events ' Drop a file called hello.txt onto the bucket input and you will receive the message on NATS subscriber as follows. [#1] Received on [minio-events]: '{\"bucket\":\"input\",\"fileName\":\"hello.txt\"}'","title":"NATS Trigger"},{"location":"sensors/triggers/nats-trigger/#nats-trigger","text":"NATS trigger allows sensor to publish events on NATS subjects. This trigger helps source the events from outside world into your messaging queues.","title":"NATS Trigger"},{"location":"sensors/triggers/nats-trigger/#specification","text":"The NATS trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/nats-trigger/#walkthrough","text":"Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a NATS subject. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : nats - trigger nats : # NATS Server URL url : nats . argo - events . svc : 4222 # Name of the subject subject : minio - events payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The NATS message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the NATS trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below, { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } If you are running NATS on local K8s cluster, make sure to port-forward to pod. kubectl -n argo-events port-forward 4222:4222 Subscribe to the subject called minio-events . Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe . go run main . go - s localhost minio - events ' Drop a file called hello.txt onto the bucket input and you will receive the message on NATS subscriber as follows. [#1] Received on [minio-events]: '{\"bucket\":\"input\",\"fileName\":\"hello.txt\"}'","title":"Walkthrough"},{"location":"sensors/triggers/openwhisk-trigger/","text":"OpenWhisk Trigger \u00b6 OpenWhisk is a framework to run serverless workloads. It ships with its own event sources but their numbers are limited and it doesn't have support for circuits, parameterization, filtering, on-demand payload construction, etc that a sensor provides. Prerequisite \u00b6 OpenWhisk must be up and running. Setup \u00b6 Coming Soon...","title":"OpenWhisk Trigger"},{"location":"sensors/triggers/openwhisk-trigger/#openwhisk-trigger","text":"OpenWhisk is a framework to run serverless workloads. It ships with its own event sources but their numbers are limited and it doesn't have support for circuits, parameterization, filtering, on-demand payload construction, etc that a sensor provides.","title":"OpenWhisk Trigger"},{"location":"sensors/triggers/openwhisk-trigger/#prerequisite","text":"OpenWhisk must be up and running.","title":"Prerequisite"},{"location":"sensors/triggers/openwhisk-trigger/#setup","text":"Coming Soon...","title":"Setup"},{"location":"sensors/triggers/pulsar-trigger/","text":"Pulsar Trigger \u00b6 Pulsar trigger allows sensor to publish events on Pulsar topic. This trigger helps source the events from outside world into your messaging queues. Specification \u00b6 The Pulsar trigger specification is available here . Walkthrough \u00b6 Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Pulsar topic. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : pulsar - trigger pulsar : # Pulsar URL url : pulsar : // pulsar . argo - events . svc : 6650 # Name of the topic topic : minio - events payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The Pulsar message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the Pulsar trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below. { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } Drop a file called hello.txt onto the bucket input and you will receive the message on Pulsar topic","title":"Pulsar Trigger"},{"location":"sensors/triggers/pulsar-trigger/#pulsar-trigger","text":"Pulsar trigger allows sensor to publish events on Pulsar topic. This trigger helps source the events from outside world into your messaging queues.","title":"Pulsar Trigger"},{"location":"sensors/triggers/pulsar-trigger/#specification","text":"The Pulsar trigger specification is available here .","title":"Specification"},{"location":"sensors/triggers/pulsar-trigger/#walkthrough","text":"Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Pulsar topic. Set up the Minio Event Source here . Do not create the Minio sensor, we are going to create it in next step. Lets create the sensor. apiVersion : argoproj . io / v1alpha1 kind : Sensor metadata : name : minio - sensor spec : dependencies : - name : test - dep eventSourceName : minio eventName : example triggers : - template : name : pulsar - trigger pulsar : # Pulsar URL url : pulsar : // pulsar . argo - events . svc : 6650 # Name of the topic topic : minio - events payload : - src : dependencyName : test - dep dataKey : notification . 0. s3 . object . key dest : fileName - src : dependencyName : test - dep dataKey : notification . 0. s3 . bucket . name dest : bucket The Pulsar message needs a body. In order to construct message based on the event data, sensor offers payload field as a part of the Pulsar trigger. The payload contains the list of src which refers to the source event and dest which refers to destination key within result request payload. The payload declared above will generate a message body like below. { \"fileName\": \"hello.txt\" // name/key of the object \"bucket\": \"input\" // name of the bucket } Drop a file called hello.txt onto the bucket input and you will receive the message on Pulsar topic","title":"Walkthrough"},{"location":"sensors/triggers/slack-trigger/","text":"Slack Trigger \u00b6 The Slack trigger is used to send a custom message to a desired Slack channel in a Slack workspace. The intended use is for notifications for a build pipeline, but can be used for any notification scenario. Prerequisite \u00b6 Deploy the eventbus in the namespace. Make sure to have a Slack workspace setup you wish to send a message to. Create a webhook event-source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Set up port-forwarding to expose the http server. We will use port-forwarding here. kubectl port-forward -n argo-events 12000:12000 Create a Slack App \u00b6 We need to create a Slack App which will send messages to your Slack Workspace. We will add OAuth Permissions and add the OAuth token to the k8s cluster via a secret. Create a Slack app by clicking Create New App at the Slack API Page . Name your app and choose your intended Slack Workspace. Navigate to your app, then to Features > OAuth & Permissions . Scroll down to Scopes and add the scopes channels:join , channels:read , groups:read and chat:write to the Bot Token Scopes . Scroll to the top of the OAuth & Permissions page and click Install App to Workspace and follow the install Wizard. You should land back on the OAuth & Permissions page. Copy your app's OAuth Access Token. This will allow the trigger to act on behalf of your newly created Slack app. Create a kubernetes secret with the OAuth token in your cluster. kubectl create secret generic slack-secret --from-literal=token=$SLACK_OAUTH_TOKEN Slack Trigger \u00b6 We will set up a basic slack trigger and send a default message, and then a dynamic custom message. Create a sensor with Slack trigger. We will discuss the trigger details in the following sections. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/slack-trigger.yaml Send a http request to the event-source-pod to fire the Slack trigger. curl -d '{\"text\":\"Hello, World!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Note : The default slack-trigger will send the message \"hello world\" to the #general channel. You may change the default message and channel in slack-trigger.yaml under triggers.slack.channel and triggers.slack.message. Alternatively, you can dynamically determine the channel and message based on parameterization of your event. curl - d '{\"channel\":\"random\",\"message\":\"test message\"}' - H \"Content-Type: application/json\" - X POST http : // localhost : 12000 / example Great! But, how did the sensor use the event to customize the message and channel from the http request? We will see that in next section. Parameterization \u00b6 The slack trigger parameters have the following structure, parameters: - src: dependencyName: test-dep dataKey: body.channel dest: slack.channel - src: dependencyName: test-dep contextKey: body.message dest: slack.message The src is the source of event. It contains, dependencyName : name of the event dependency to extract the event from. dataKey : to extract a particular key-value from event's data. contextKey : to extract a particular key-value from event' context. The dest is the destination key within the result payload. So, the above trigger parameters will generate a request payload as, { \"channel\": \"channel_to_send_message\", \"message\": \"message_to_send_to_channel\" } Note : If you define both the contextKey and dataKey within a parameter item, then the dataKey takes the precedence. You can create any parameter structure you want. To get more info on how to generate complex event payloads, take a look at this library . Other Capabilities \u00b6 Configuring the sender of the Slack message: \u00b6 - template: name: slack-trigger slack: sender: username: \"Cool Robot\" icon: \":robot_face:\" # emoji or url, e.g. https://example.com/image.png Sending messages to Slack threads: \u00b6 - template: name: slack - trigger slack: thread: messageAggregationKey: \"abcdefg\" # aggregate message by some key to send them to the same Slack thread broadcastMessageToChannel: true # also broadcast the message from the thread to the channel Sending attachments using Slack Attachments API : \u00b6 - template: name: slack-trigger slack: message: \"hello world!\" attachments: | [{ \"title\": \"Attachment1!\", \"title_link\": \"https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/\", \"color\": \"#18be52\", \"fields\": [{ \"title\": \"Hello1\", \"value\": \"Hello World1\", \"short\": true }, { \"title\": \"Hello2\", \"value\": \"Hello World2\", \"short\": true }] }, { \"title\": \"Attachment2!\", \"title_link\": \"https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/\", \"color\": \"#18be52\", \"fields\": [{ \"title\": \"Hello1\", \"value\": \"Hello World1\", \"short\": true }, { \"title\": \"Hello2\", \"value\": \"Hello World2\", \"short\": true }] }] Sending blocks using Slack Blocks API : \u00b6 - template : name : slack - trigger slack : blocks : | [{ \"type\" : \"actions\" , \"block_id\" : \"actionblock789\" , \"elements\" : [{ \"type\" : \"datepicker\" , \"action_id\" : \"datepicker123\" , \"initial_date\" : \"1990-04-28\" , \"placeholder\" : { \"type\" : \"plain_text\" , \"text\" : \"Select a date\" } }, { \"type\" : \"overflow\" , \"options\" : [{ \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-0\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-1\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-2\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-3\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-4\" } ], \"action_id\" : \"overflow\" }, { \"type\" : \"button\" , \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"Click Me\" }, \"value\" : \"click_me_123\" , \"action_id\" : \"button\" } ] }] The complete specification of Slack trigger is available here .","title":"Slack Trigger"},{"location":"sensors/triggers/slack-trigger/#slack-trigger","text":"The Slack trigger is used to send a custom message to a desired Slack channel in a Slack workspace. The intended use is for notifications for a build pipeline, but can be used for any notification scenario.","title":"Slack Trigger"},{"location":"sensors/triggers/slack-trigger/#prerequisite","text":"Deploy the eventbus in the namespace. Make sure to have a Slack workspace setup you wish to send a message to. Create a webhook event-source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Set up port-forwarding to expose the http server. We will use port-forwarding here. kubectl port-forward -n argo-events 12000:12000","title":"Prerequisite"},{"location":"sensors/triggers/slack-trigger/#create-a-slack-app","text":"We need to create a Slack App which will send messages to your Slack Workspace. We will add OAuth Permissions and add the OAuth token to the k8s cluster via a secret. Create a Slack app by clicking Create New App at the Slack API Page . Name your app and choose your intended Slack Workspace. Navigate to your app, then to Features > OAuth & Permissions . Scroll down to Scopes and add the scopes channels:join , channels:read , groups:read and chat:write to the Bot Token Scopes . Scroll to the top of the OAuth & Permissions page and click Install App to Workspace and follow the install Wizard. You should land back on the OAuth & Permissions page. Copy your app's OAuth Access Token. This will allow the trigger to act on behalf of your newly created Slack app. Create a kubernetes secret with the OAuth token in your cluster. kubectl create secret generic slack-secret --from-literal=token=$SLACK_OAUTH_TOKEN","title":"Create a Slack App"},{"location":"sensors/triggers/slack-trigger/#slack-trigger_1","text":"We will set up a basic slack trigger and send a default message, and then a dynamic custom message. Create a sensor with Slack trigger. We will discuss the trigger details in the following sections. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/slack-trigger.yaml Send a http request to the event-source-pod to fire the Slack trigger. curl -d '{\"text\":\"Hello, World!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Note : The default slack-trigger will send the message \"hello world\" to the #general channel. You may change the default message and channel in slack-trigger.yaml under triggers.slack.channel and triggers.slack.message. Alternatively, you can dynamically determine the channel and message based on parameterization of your event. curl - d '{\"channel\":\"random\",\"message\":\"test message\"}' - H \"Content-Type: application/json\" - X POST http : // localhost : 12000 / example Great! But, how did the sensor use the event to customize the message and channel from the http request? We will see that in next section.","title":"Slack Trigger"},{"location":"sensors/triggers/slack-trigger/#parameterization","text":"The slack trigger parameters have the following structure, parameters: - src: dependencyName: test-dep dataKey: body.channel dest: slack.channel - src: dependencyName: test-dep contextKey: body.message dest: slack.message The src is the source of event. It contains, dependencyName : name of the event dependency to extract the event from. dataKey : to extract a particular key-value from event's data. contextKey : to extract a particular key-value from event' context. The dest is the destination key within the result payload. So, the above trigger parameters will generate a request payload as, { \"channel\": \"channel_to_send_message\", \"message\": \"message_to_send_to_channel\" } Note : If you define both the contextKey and dataKey within a parameter item, then the dataKey takes the precedence. You can create any parameter structure you want. To get more info on how to generate complex event payloads, take a look at this library .","title":"Parameterization"},{"location":"sensors/triggers/slack-trigger/#other-capabilities","text":"","title":"Other Capabilities"},{"location":"sensors/triggers/slack-trigger/#configuring-the-sender-of-the-slack-message","text":"- template: name: slack-trigger slack: sender: username: \"Cool Robot\" icon: \":robot_face:\" # emoji or url, e.g. https://example.com/image.png","title":"Configuring the sender of the Slack message:"},{"location":"sensors/triggers/slack-trigger/#sending-messages-to-slack-threads","text":"- template: name: slack - trigger slack: thread: messageAggregationKey: \"abcdefg\" # aggregate message by some key to send them to the same Slack thread broadcastMessageToChannel: true # also broadcast the message from the thread to the channel","title":"Sending messages to Slack threads:"},{"location":"sensors/triggers/slack-trigger/#sending-attachments-using-slack-attachments-api","text":"- template: name: slack-trigger slack: message: \"hello world!\" attachments: | [{ \"title\": \"Attachment1!\", \"title_link\": \"https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/\", \"color\": \"#18be52\", \"fields\": [{ \"title\": \"Hello1\", \"value\": \"Hello World1\", \"short\": true }, { \"title\": \"Hello2\", \"value\": \"Hello World2\", \"short\": true }] }, { \"title\": \"Attachment2!\", \"title_link\": \"https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/\", \"color\": \"#18be52\", \"fields\": [{ \"title\": \"Hello1\", \"value\": \"Hello World1\", \"short\": true }, { \"title\": \"Hello2\", \"value\": \"Hello World2\", \"short\": true }] }]","title":"Sending attachments using Slack Attachments API:"},{"location":"sensors/triggers/slack-trigger/#sending-blocks-using-slack-blocks-api","text":"- template : name : slack - trigger slack : blocks : | [{ \"type\" : \"actions\" , \"block_id\" : \"actionblock789\" , \"elements\" : [{ \"type\" : \"datepicker\" , \"action_id\" : \"datepicker123\" , \"initial_date\" : \"1990-04-28\" , \"placeholder\" : { \"type\" : \"plain_text\" , \"text\" : \"Select a date\" } }, { \"type\" : \"overflow\" , \"options\" : [{ \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-0\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-1\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-2\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-3\" }, { \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"*this is plain_text text*\" }, \"value\" : \"value-4\" } ], \"action_id\" : \"overflow\" }, { \"type\" : \"button\" , \"text\" : { \"type\" : \"plain_text\" , \"text\" : \"Click Me\" }, \"value\" : \"click_me_123\" , \"action_id\" : \"button\" } ] }] The complete specification of Slack trigger is available here .","title":"Sending blocks using Slack Blocks API:"},{"location":"tutorials/01-introduction/","text":"Introduction \u00b6 In the tutorials, we will cover every aspect of Argo Events and demonstrate how you can leverage these features to build an event driven workflow pipeline. All the concepts you will learn in this tutorial and subsequent ones can be applied to any type of event-source. Prerequisites \u00b6 Follow the installation guide to set up the Argo Events. Make sure to configure Argo Workflow controller to listen to workflow objects created in argo-events namespace. (See this link.) The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no \"--namespaced\" argument) so that it has visibility to all namespaces, or with \"--managed-namespace\" set to define \"argo-events\" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file: kubectl apply - n argo - f https : // github . com / argoproj / argo - workflows / releases / latest / download / install . yaml Make sure to read the concepts behind eventbus . sensor . event source . Follow the instruction to create a Service Account operate-workflow-sa with proper privileges, and make sure the Service Account used by Workflows (here we use default in the tutorials for demonstration purpose) has proper RBAC settings. Get Started \u00b6 We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. Let' set up the eventbus. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Create the webhook event source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the webhook sensor. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml If the commands are executed successfully, the eventbus, event-source and sensor pods will get created. You will also notice that a service is created for the event-source. Expose the event-source pod via Ingress, OpenShift Route or port forward to consume requests over HTTP. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf Make sure the workflow pod ran successfully. argo logs - n argo - events @latest Should result in something similar to what is below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"38376665363064642d343336352d34 | | 3035372d393766662d366234326130656232343 | | 337\" , \"time\" : \"2020-01-11T16:55:42.996636 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIzOCJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | | jp7Im1lc3NhZ2UiOiJ0aGlzIGlzIG15IGZpcnN0 | \\ IHdlYmhvb2sifX0=\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ / Note: You will see the message printed in the workflow logs contains both the event context and data, with data being base64 encoded. In later sections, we will see how to extract particular key-value from event context or data and pass it to the workflow as arguments. Troubleshoot \u00b6 If you don't see the event-source and sensor pod in argo-events namespace, Inspect the event-source. kubectl -n argo-events get eventsource event-source-object-name -o yaml Inspect the sensor. kubectl -n argo-events get sensor sensor-object-name -o yaml and look for any errors within the Status . Make sure the correct Role and RoleBindings are applied to the service account and there are no errors in both event-source and sensor controller. Check the logs of event-source and sensor controller. Make sure the controllers have processed the event-source and sensor objects and there are no errors. Raise an issue on GitHub or post a question on argo-events slack channel.","title":"Introduction"},{"location":"tutorials/01-introduction/#introduction","text":"In the tutorials, we will cover every aspect of Argo Events and demonstrate how you can leverage these features to build an event driven workflow pipeline. All the concepts you will learn in this tutorial and subsequent ones can be applied to any type of event-source.","title":"Introduction"},{"location":"tutorials/01-introduction/#prerequisites","text":"Follow the installation guide to set up the Argo Events. Make sure to configure Argo Workflow controller to listen to workflow objects created in argo-events namespace. (See this link.) The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no \"--namespaced\" argument) so that it has visibility to all namespaces, or with \"--managed-namespace\" set to define \"argo-events\" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file: kubectl apply - n argo - f https : // github . com / argoproj / argo - workflows / releases / latest / download / install . yaml Make sure to read the concepts behind eventbus . sensor . event source . Follow the instruction to create a Service Account operate-workflow-sa with proper privileges, and make sure the Service Account used by Workflows (here we use default in the tutorials for demonstration purpose) has proper RBAC settings.","title":"Prerequisites"},{"location":"tutorials/01-introduction/#get-started","text":"We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. Let' set up the eventbus. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml Create the webhook event source. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml Create the webhook sensor. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml If the commands are executed successfully, the eventbus, event-source and sensor pods will get created. You will also notice that a service is created for the event-source. Expose the event-source pod via Ingress, OpenShift Route or port forward to consume requests over HTTP. kubectl -n argo-events port-forward 12000:12000 Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf Make sure the workflow pod ran successfully. argo logs - n argo - events @latest Should result in something similar to what is below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"38376665363064642d343336352d34 | | 3035372d393766662d366234326130656232343 | | 337\" , \"time\" : \"2020-01-11T16:55:42.996636 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIzOCJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | | jp7Im1lc3NhZ2UiOiJ0aGlzIGlzIG15IGZpcnN0 | \\ IHdlYmhvb2sifX0=\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ / Note: You will see the message printed in the workflow logs contains both the event context and data, with data being base64 encoded. In later sections, we will see how to extract particular key-value from event context or data and pass it to the workflow as arguments.","title":"Get Started"},{"location":"tutorials/01-introduction/#troubleshoot","text":"If you don't see the event-source and sensor pod in argo-events namespace, Inspect the event-source. kubectl -n argo-events get eventsource event-source-object-name -o yaml Inspect the sensor. kubectl -n argo-events get sensor sensor-object-name -o yaml and look for any errors within the Status . Make sure the correct Role and RoleBindings are applied to the service account and there are no errors in both event-source and sensor controller. Check the logs of event-source and sensor controller. Make sure the controllers have processed the event-source and sensor objects and there are no errors. Raise an issue on GitHub or post a question on argo-events slack channel.","title":"Troubleshoot"},{"location":"tutorials/02-parameterization/","text":"Parameterization \u00b6 In the previous section, we saw how to set up a basic webhook event-source and sensor. The trigger template had parameters set in the sensor object, and the workflow was able to print the event payload. In this tutorial, we will dig deeper into different types of parameterization, how to extract particular key-value from event payload and how to use default values if certain key is not available within event payload. Trigger Resource Parameterization \u00b6 If you take a closer look at the Sensor object, you will notice it contains a list of triggers. Each Trigger contains the template that defines the context of the trigger and actual resource that we expect the sensor to execute. In the previous section, the resource within the trigger template was an Argo workflow. This subsection deals with how to parameterize the resource within trigger template with the event payload. Prerequisites \u00b6 Make sure to have the basic webhook event-source and sensor set up. Follow the introduction tutorial if haven't done already. Webhook Event Payload \u00b6 Webhook event-source consumes events through HTTP requests and transforms them into CloudEvents. The structure of the event the Webhook sensor receives from the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Context : This is the CloudEvent context and it is populated by the event-source regardless of type of HTTP request. Data : Data contains following fields. Header : The header within event data contains the headers in the HTTP request that was dispatched to the event-source. The event-source extracts the headers from the request and put it in the header within event data . Body : This is the request payload from the HTTP request. Event Context \u00b6 Now that we have an understanding of the structure of the event the webhook sensor receives from the event-source over the eventbus, lets see how we can use the event context to parameterize the Argo workflow. Update the Webhook Sensor and add the contextKey for the parameter at index 0. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-01.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, _________ < webhook > --------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ We have successfully extracted the type key within the event context and parameterized the workflow to print the value of the type . Event Data \u00b6 Now, it is time to use the event data and parameterize the Argo workflow trigger. We will extract the message from request payload and get the Argo workflow to print the message. Update the Webhook Sensor and add the dataKey in the parameter at index 0. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-02.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, __________________________ < this is my first webhook > -------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Yay!! The Argo workflow printed the message. You can add however many number of parameters to update the trigger resource on the fly. Note : If you define both the contextKey and dataKey within a parameter, then the dataKey takes the precedence. Note : When useRawData is not specified or explicitly set to false, the parameter will resolve to a string type. When useRawData is set to true, a number, boolean, json or string parameter may be resolved. Default Values \u00b6 Each parameter comes with an option to configure the default value. This is specially important when the key you defined in the parameter doesn't exist in the event. Update the Webhook Sensor and add the value for the parameter at index 0. We will also update the dataKey to an unknown event key. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-03.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, _______________________ < wow! a default value. > ----------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Sprig Templates \u00b6 The sprig template exposed through contextTemplate and dataTemplate lets you alter the event context and event data before it gets applied to the trigger via parameters . Take a look at the example defined here , it contains the parameters as follows, parameters : # Retrieve the 'message' key from the payload - src : dependencyName : test - dep dataTemplate : \"{{ .Input.body.message | title }}\" dest : spec . arguments . parameters . 0. value # Title case the context subject - src : dependencyName : test - dep contextTemplate : \"{{ .Input.subject | title }}\" dest : spec . arguments . parameters . 1. value # Retrieve the 'name' key from the payload, remove all whitespace and lowercase it. - src : dependencyName : test - dep dataTemplate : \"{{ .Input.body.name | nospace | lower }}-\" dest : metadata . generateName operation : append Consider the event the sensor received has format like, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : { \"name\" : \"foo bar\" , \"message\" : \"hello there!!\" }, } } The parameters are transformed as, The first parameter extracts the body.message from event data and applies title filter which basically capitalizes the first letter and replaces the spec.arguments.parameters.0.value . The second parameter extracts the subject from the event context and again applies title filter and replaces the spec.arguments.parameters.1.value . The third parameter extracts the body.name from the event data, applies nospace filter which removes all white spaces and then lower filter which lowercases the text and appends it to metadata.generateName . Send a curl request to event-source as follows, curl -d '{\"name\":\"foo bar\", \"message\": \"hello there!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example and you will see an Argo workflow being sprung with name like webhook-foobar-xxxxx . Check the output of the workflow, it should print something like, ____________________________ < Hello There!! from Example > ---------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Operations \u00b6 Sometimes you need the ability to append or prepend a parameter value to an existing value in trigger resource. This is where the operation field within a parameter comes handy. Update the Webhook Sensor and add the operation in the parameter at index 0. We will prepend the message to an existing value. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-04.yaml Send a HTTP request to the event-source. curl -d '{\"message\":\"hey!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, __________________ < hey!!hello world > ------------------ \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Trigger Template Parameterization \u00b6 The parameterization you saw above deals with the trigger resource, but sometimes you need to parameterize the trigger template itself. This comes handy when you have the trigger resource stored on some external source like S3, Git, etc. and you need to replace the url of the source on the fly in trigger template. Imagine a scenario where you want to parameterize the parameters of trigger to parameterize the trigger resource. What?... The sensor you have been using in this tutorial has one parameter defined in the trigger resource under k8s . We will parameterize that parameter by applying a parameter at the trigger template level. Update the Webhook Sensor and add parameters at trigger level. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-05.yaml Send a HTTP request to the event-source. curl -d '{\"dependencyName\":\"test-dep\", \"dataKey\": \"body.message\", \"dest\": \"spec.arguments.parameters.0.value\", \"message\": \"amazing!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, ___________ < amazing!! > ----------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Great!! You have now learned how to apply parameters at trigger resource and template level. Keep in mind that you can apply default values and operations like prepend and append for trigger template parameters as well.","title":"Parameterization"},{"location":"tutorials/02-parameterization/#parameterization","text":"In the previous section, we saw how to set up a basic webhook event-source and sensor. The trigger template had parameters set in the sensor object, and the workflow was able to print the event payload. In this tutorial, we will dig deeper into different types of parameterization, how to extract particular key-value from event payload and how to use default values if certain key is not available within event payload.","title":"Parameterization"},{"location":"tutorials/02-parameterization/#trigger-resource-parameterization","text":"If you take a closer look at the Sensor object, you will notice it contains a list of triggers. Each Trigger contains the template that defines the context of the trigger and actual resource that we expect the sensor to execute. In the previous section, the resource within the trigger template was an Argo workflow. This subsection deals with how to parameterize the resource within trigger template with the event payload.","title":"Trigger Resource Parameterization"},{"location":"tutorials/02-parameterization/#prerequisites","text":"Make sure to have the basic webhook event-source and sensor set up. Follow the introduction tutorial if haven't done already.","title":"Prerequisites"},{"location":"tutorials/02-parameterization/#webhook-event-payload","text":"Webhook event-source consumes events through HTTP requests and transforms them into CloudEvents. The structure of the event the Webhook sensor receives from the event-source over the eventbus looks like following, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"header\" : {}, \"body\" : {}, } } Context : This is the CloudEvent context and it is populated by the event-source regardless of type of HTTP request. Data : Data contains following fields. Header : The header within event data contains the headers in the HTTP request that was dispatched to the event-source. The event-source extracts the headers from the request and put it in the header within event data . Body : This is the request payload from the HTTP request.","title":"Webhook Event Payload"},{"location":"tutorials/02-parameterization/#event-context","text":"Now that we have an understanding of the structure of the event the webhook sensor receives from the event-source over the eventbus, lets see how we can use the event context to parameterize the Argo workflow. Update the Webhook Sensor and add the contextKey for the parameter at index 0. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-01.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, _________ < webhook > --------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ We have successfully extracted the type key within the event context and parameterized the workflow to print the value of the type .","title":"Event Context"},{"location":"tutorials/02-parameterization/#event-data","text":"Now, it is time to use the event data and parameterize the Argo workflow trigger. We will extract the message from request payload and get the Argo workflow to print the message. Update the Webhook Sensor and add the dataKey in the parameter at index 0. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-02.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, __________________________ < this is my first webhook > -------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Yay!! The Argo workflow printed the message. You can add however many number of parameters to update the trigger resource on the fly. Note : If you define both the contextKey and dataKey within a parameter, then the dataKey takes the precedence. Note : When useRawData is not specified or explicitly set to false, the parameter will resolve to a string type. When useRawData is set to true, a number, boolean, json or string parameter may be resolved.","title":"Event Data"},{"location":"tutorials/02-parameterization/#default-values","text":"Each parameter comes with an option to configure the default value. This is specially important when the key you defined in the parameter doesn't exist in the event. Update the Webhook Sensor and add the value for the parameter at index 0. We will also update the dataKey to an unknown event key. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-03.yaml Send a HTTP request to the event-source pod. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, _______________________ < wow! a default value. > ----------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Default Values"},{"location":"tutorials/02-parameterization/#sprig-templates","text":"The sprig template exposed through contextTemplate and dataTemplate lets you alter the event context and event data before it gets applied to the trigger via parameters . Take a look at the example defined here , it contains the parameters as follows, parameters : # Retrieve the 'message' key from the payload - src : dependencyName : test - dep dataTemplate : \"{{ .Input.body.message | title }}\" dest : spec . arguments . parameters . 0. value # Title case the context subject - src : dependencyName : test - dep contextTemplate : \"{{ .Input.subject | title }}\" dest : spec . arguments . parameters . 1. value # Retrieve the 'name' key from the payload, remove all whitespace and lowercase it. - src : dependencyName : test - dep dataTemplate : \"{{ .Input.body.name | nospace | lower }}-\" dest : metadata . generateName operation : append Consider the event the sensor received has format like, { \"context\" : { \"type\" : \"type_of_event_source\" , \"specversion\" : \"cloud_events_version\" , \"source\" : \"name_of_the_event_source\" , \"id\" : \"unique_event_id\" , \"time\" : \"event_time\" , \"datacontenttype\" : \"type_of_data\" , \"subject\" : \"name_of_the_configuration_within_event_source\" }, \"data\" : { \"body\" : { \"name\" : \"foo bar\" , \"message\" : \"hello there!!\" }, } } The parameters are transformed as, The first parameter extracts the body.message from event data and applies title filter which basically capitalizes the first letter and replaces the spec.arguments.parameters.0.value . The second parameter extracts the subject from the event context and again applies title filter and replaces the spec.arguments.parameters.1.value . The third parameter extracts the body.name from the event data, applies nospace filter which removes all white spaces and then lower filter which lowercases the text and appends it to metadata.generateName . Send a curl request to event-source as follows, curl -d '{\"name\":\"foo bar\", \"message\": \"hello there!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example and you will see an Argo workflow being sprung with name like webhook-foobar-xxxxx . Check the output of the workflow, it should print something like, ____________________________ < Hello There!! from Example > ---------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Sprig Templates"},{"location":"tutorials/02-parameterization/#operations","text":"Sometimes you need the ability to append or prepend a parameter value to an existing value in trigger resource. This is where the operation field within a parameter comes handy. Update the Webhook Sensor and add the operation in the parameter at index 0. We will prepend the message to an existing value. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-04.yaml Send a HTTP request to the event-source. curl -d '{\"message\":\"hey!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, __________________ < hey!!hello world > ------------------ \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Operations"},{"location":"tutorials/02-parameterization/#trigger-template-parameterization","text":"The parameterization you saw above deals with the trigger resource, but sometimes you need to parameterize the trigger template itself. This comes handy when you have the trigger resource stored on some external source like S3, Git, etc. and you need to replace the url of the source on the fly in trigger template. Imagine a scenario where you want to parameterize the parameters of trigger to parameterize the trigger resource. What?... The sensor you have been using in this tutorial has one parameter defined in the trigger resource under k8s . We will parameterize that parameter by applying a parameter at the trigger template level. Update the Webhook Sensor and add parameters at trigger level. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-05.yaml Send a HTTP request to the event-source. curl -d '{\"dependencyName\":\"test-dep\", \"dataKey\": \"body.message\", \"dest\": \"spec.arguments.parameters.0.value\", \"message\": \"amazing!!\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Inspect the output of the Argo workflow that was created. argo logs name_of_the_workflow You will see the following output, ___________ < amazing!! > ----------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Great!! You have now learned how to apply parameters at trigger resource and template level. Keep in mind that you can apply default values and operations like prepend and append for trigger template parameters as well.","title":"Trigger Template Parameterization"},{"location":"tutorials/03-trigger-sources/","text":"Trigger Sources \u00b6 A trigger source is the source of trigger resource. It can be either external source such as Git , S3 , K8s Configmap , File , any valid URL that hosts the resource or an internal resource which is defined in the sensor object itself like Inline or Resource . In the previous sections, you have been dealing with the Resource trigger source. In this tutorial, we will explore other trigger sources. Prerequisites \u00b6 The Webhook event-source is already set up. Git \u00b6 Git trigger source refers to K8s trigger refers to the K8s resource stored in Git. The specification for the Git source is available here . In order to fetch data from git, you need to set up the private SSH key in sensor. If you don't have ssh keys available, create them following this guide . Create a K8s secret that holds the SSH keys. kubectl -n argo-events create secret generic git-ssh --from-file=key=.ssh/ Create a K8s secret that holds known hosts. kubectl -n argo-events create secret generic git-known-hosts --from-file=ssh_known_hosts=.ssh/known_hosts Create a sensor with the git trigger source and refer it to the hello world workflow stored on the Argo Git project. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-git.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf S3 \u00b6 You can refer to the K8s resource stored on S3 compliant store as the trigger source. For this tutorial, lets set up a minio server which is S3 compliant store. Create a K8s secret called artifacts-minio that holds your minio access key and secret key. The access key must be stored under accesskey key and secret key must be stored under secretkey . Follow steps described here to set up the minio server. Make sure a service is available to expose the minio server. Create a bucket called workflows and store a basic hello world Argo workflow with key name hello-world.yaml . Create the sensor with trigger source as S3. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-minio.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf K8s Configmap \u00b6 K8s configmap can be treated as trigger source if needed. Lets create a configmap called trigger-store . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/trigger-store.yaml Create a sensor with trigger source as configmap and refer it to the trigger-store . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-cm.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf File & URL \u00b6 File and URL trigger sources are pretty self explanatory. The example sensors are available under examples/sensors folder.","title":"Trigger Sources"},{"location":"tutorials/03-trigger-sources/#trigger-sources","text":"A trigger source is the source of trigger resource. It can be either external source such as Git , S3 , K8s Configmap , File , any valid URL that hosts the resource or an internal resource which is defined in the sensor object itself like Inline or Resource . In the previous sections, you have been dealing with the Resource trigger source. In this tutorial, we will explore other trigger sources.","title":"Trigger Sources"},{"location":"tutorials/03-trigger-sources/#prerequisites","text":"The Webhook event-source is already set up.","title":"Prerequisites"},{"location":"tutorials/03-trigger-sources/#git","text":"Git trigger source refers to K8s trigger refers to the K8s resource stored in Git. The specification for the Git source is available here . In order to fetch data from git, you need to set up the private SSH key in sensor. If you don't have ssh keys available, create them following this guide . Create a K8s secret that holds the SSH keys. kubectl -n argo-events create secret generic git-ssh --from-file=key=.ssh/ Create a K8s secret that holds known hosts. kubectl -n argo-events create secret generic git-known-hosts --from-file=ssh_known_hosts=.ssh/known_hosts Create a sensor with the git trigger source and refer it to the hello world workflow stored on the Argo Git project. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-git.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf","title":"Git"},{"location":"tutorials/03-trigger-sources/#s3","text":"You can refer to the K8s resource stored on S3 compliant store as the trigger source. For this tutorial, lets set up a minio server which is S3 compliant store. Create a K8s secret called artifacts-minio that holds your minio access key and secret key. The access key must be stored under accesskey key and secret key must be stored under secretkey . Follow steps described here to set up the minio server. Make sure a service is available to expose the minio server. Create a bucket called workflows and store a basic hello world Argo workflow with key name hello-world.yaml . Create the sensor with trigger source as S3. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-minio.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf","title":"S3"},{"location":"tutorials/03-trigger-sources/#k8s-configmap","text":"K8s configmap can be treated as trigger source if needed. Lets create a configmap called trigger-store . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/trigger-store.yaml Create a sensor with trigger source as configmap and refer it to the trigger-store . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-cm.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see an Argo workflow being created. kubectl -n argo-events get wf","title":"K8s Configmap"},{"location":"tutorials/03-trigger-sources/#file-url","text":"File and URL trigger sources are pretty self explanatory. The example sensors are available under examples/sensors folder.","title":"File & URL"},{"location":"tutorials/04-standard-k8s-resources/","text":"Trigger Standard K8s Resources \u00b6 In the previous sections, you saw how to trigger the Argo workflows. In this tutorial, you will see how to trigger Pod and Deployment. Note: You can trigger any standard Kubernetes object. Having the ability to trigger standard Kubernetes resources is quite powerful as provides an avenue to set up pipelines for existing workloads. Prerequisites \u00b6 Make sure that the service account used by the Sensor has necessary permissions to create the Kubernetes resource of your choice. We use k8s-resource-sa for below examples, it should be bound to a Role like following. apiVersion : rbac . authorization . k8s . io / v1 kind : Role metadata : name : create - deploy - pod - role rules : - apiGroups : - \"\" resources : - pods verbs : - create - apiGroups : - apps resources : - deployments verbs : - create The Webhook event-source is already set up. Pod \u00b6 Create a sensor with K8s trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-pod.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see a pod being created. kubectl -n argo-events get po After the pod was completed, inspect the logs of the pod, you will something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ / Deployment \u00b6 Lets create a sensor with a K8s deployment as trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-deployment.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see a deployment being created. Get the corresponding pod. kubectl -n argo-events get deployments After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ /","title":"Trigger Standard K8s Resources"},{"location":"tutorials/04-standard-k8s-resources/#trigger-standard-k8s-resources","text":"In the previous sections, you saw how to trigger the Argo workflows. In this tutorial, you will see how to trigger Pod and Deployment. Note: You can trigger any standard Kubernetes object. Having the ability to trigger standard Kubernetes resources is quite powerful as provides an avenue to set up pipelines for existing workloads.","title":"Trigger Standard K8s Resources"},{"location":"tutorials/04-standard-k8s-resources/#prerequisites","text":"Make sure that the service account used by the Sensor has necessary permissions to create the Kubernetes resource of your choice. We use k8s-resource-sa for below examples, it should be bound to a Role like following. apiVersion : rbac . authorization . k8s . io / v1 kind : Role metadata : name : create - deploy - pod - role rules : - apiGroups : - \"\" resources : - pods verbs : - create - apiGroups : - apps resources : - deployments verbs : - create The Webhook event-source is already set up.","title":"Prerequisites"},{"location":"tutorials/04-standard-k8s-resources/#pod","text":"Create a sensor with K8s trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-pod.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see a pod being created. kubectl -n argo-events get po After the pod was completed, inspect the logs of the pod, you will something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ /","title":"Pod"},{"location":"tutorials/04-standard-k8s-resources/#deployment","text":"Lets create a sensor with a K8s deployment as trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-deployment.yaml Use either Curl or Postman to send a post request to the http://localhost:12000/example . curl -d '{\"message\":\"ok\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example Now, you should see a deployment being created. Get the corresponding pod. kubectl -n argo-events get deployments After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / { \"context\" :{ \"type\" : \"webhook\" , \"specVersi \\ | on\" : \"0.3\" , \"source\" : \"webhook\" , \"e | | ventID\" : \"30306463666539362d346666642d34 | | 3336332d383861312d336538363333613564313 | | 932\" , \"time\" : \"2020-01-11T21:23:07.682961 | | Z\" , \"dataContentType\" : \"application/json\" | | , \"subject\" : \"example\" }, \"data\" : \"eyJoZWFkZ | | XIiOnsiQWNjZXB0IjpbIiovKiJdLCJDb250ZW50 | | LUxlbmd0aCI6WyIxOSJdLCJDb250ZW50LVR5cGU | | iOlsiYXBwbGljYXRpb24vanNvbiJdLCJVc2VyLU | | FnZW50IjpbImN1cmwvNy41NC4wIl19LCJib2R5I | \\ jp7Im1lc3NhZ2UiOiJoZXkhISJ9fQ==\" } / ----------------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## == = / \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" ___ / == = ~~~ { ~~ ~~~~ ~~~ ~~~~ ~~ ~ / == = - ~~~ \\ ______ o __ / \\ \\ __ / \\ ____ \\ ______ /","title":"Deployment"},{"location":"tutorials/05-trigger-custom-resources/","text":"Trigger Custom Resources \u00b6 Take a look at Build Your Own Trigger to customize the sensor.","title":"Trigger Custom Resources"},{"location":"tutorials/05-trigger-custom-resources/#trigger-custom-resources","text":"Take a look at Build Your Own Trigger to customize the sensor.","title":"Trigger Custom Resources"},{"location":"tutorials/06-trigger-conditions/","text":"Trigger Conditions \u00b6 In the previous sections, you have been dealing with just a single dependency. But, in many cases, you want to wait for multiple events to occur and then trigger a resource which means you need a mechanism to determine which triggers to execute based on set of different event dependencies. This mechanism is supported through conditions . Note : Whenever you define multiple dependencies in a sensor, the sensor applies a AND operation, meaning, it will wait for all dependencies to resolve before it executes triggers. conditions can modify that behavior. Prerequisite \u00b6 Minio server must be set up in the argo-events namespace with a bucket called test and it should be available at minio-service.argo-events:9000 . Conditions \u00b6 Consider a scenario where you have a Webhook and Minio event-source, and you want to trigger an Argo workflow if the sensor receives an event from the Webhook event-source, but, another workflow if it receives an event from the Minio event-source. Create the webhook event-source and event-source. The event-source listens to HTTP requests on port 12000 . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/webhook-event-source.yaml Create the minio event-source. The event-source listens to events of type PUT and DELETE for objects in bucket test . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/minio-event-source.yaml Make sure there are no errors in any of the event-sources. Let's create the sensor. If you take a closer look at the trigger templates, you will notice that it contains a field named conditions , which is a boolean expression contains dependency names. So, as soon as the expression is resolved as true, the corresponding trigger will be executed. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/sensor-01.yaml Send a HTTP request to Webhook event-source. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice an Argo workflow with name group-1-xxxx is created with following output, __________________________ < this is my first webhook > -------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Now, lets generate a Minio event so that we can run group-2-xxxx workflow. Drop a file onto test bucket. The workflow that will get created will print the name of the bucket as follows, ______ < test > ------ \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Great!! You have now learned how to use conditions . Lets update the sensor with a trigger that waits for both dependencies to resolve. This is the normal sensor behavior if conditions is not defined. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/sensor-02.yaml Send a HTTP request and perform a file drop on Minio bucket as done above. You should get the following output. _______________________________ < this is my first webhook test > ------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Trigger Conditions"},{"location":"tutorials/06-trigger-conditions/#trigger-conditions","text":"In the previous sections, you have been dealing with just a single dependency. But, in many cases, you want to wait for multiple events to occur and then trigger a resource which means you need a mechanism to determine which triggers to execute based on set of different event dependencies. This mechanism is supported through conditions . Note : Whenever you define multiple dependencies in a sensor, the sensor applies a AND operation, meaning, it will wait for all dependencies to resolve before it executes triggers. conditions can modify that behavior.","title":"Trigger Conditions"},{"location":"tutorials/06-trigger-conditions/#prerequisite","text":"Minio server must be set up in the argo-events namespace with a bucket called test and it should be available at minio-service.argo-events:9000 .","title":"Prerequisite"},{"location":"tutorials/06-trigger-conditions/#conditions","text":"Consider a scenario where you have a Webhook and Minio event-source, and you want to trigger an Argo workflow if the sensor receives an event from the Webhook event-source, but, another workflow if it receives an event from the Minio event-source. Create the webhook event-source and event-source. The event-source listens to HTTP requests on port 12000 . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/webhook-event-source.yaml Create the minio event-source. The event-source listens to events of type PUT and DELETE for objects in bucket test . kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/minio-event-source.yaml Make sure there are no errors in any of the event-sources. Let's create the sensor. If you take a closer look at the trigger templates, you will notice that it contains a field named conditions , which is a boolean expression contains dependency names. So, as soon as the expression is resolved as true, the corresponding trigger will be executed. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/sensor-01.yaml Send a HTTP request to Webhook event-source. curl -d '{\"message\":\"this is my first webhook\"}' -H \"Content-Type: application/json\" -X POST http://localhost:12000/example You will notice an Argo workflow with name group-1-xxxx is created with following output, __________________________ < this is my first webhook > -------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Now, lets generate a Minio event so that we can run group-2-xxxx workflow. Drop a file onto test bucket. The workflow that will get created will print the name of the bucket as follows, ______ < test > ------ \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/ Great!! You have now learned how to use conditions . Lets update the sensor with a trigger that waits for both dependencies to resolve. This is the normal sensor behavior if conditions is not defined. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/sensor-02.yaml Send a HTTP request and perform a file drop on Minio bucket as done above. You should get the following output. _______________________________ < this is my first webhook test > ------------------------------- \\ \\ \\ ## . ## ## ## == ## ## ## ## === /\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"___/ === ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ \\______ o __/ \\ \\ __/ \\____\\______/","title":"Conditions"},{"location":"tutorials/07-policy/","text":"Policy \u00b6 A policy for a trigger determines whether the trigger resulted in success or failure. Currently, Argo Events supports 2 types of policies: Policy based on the K8s resource labels. Policy based on the response status for triggers like HTTP request, AWS Lambda, etc. Resource Labels Policy \u00b6 This type of policy determines whether trigger completed successfully based on the labels set on the trigger resource. Consider a sensor which has an Argo workflow as the trigger. When an Argo workflow completes successfully, the workflow controller sets a label on the resource as workflows.argoproj.io/completed: 'true' . So, in order for sensor to determine whether the trigger workflow completed successfully, you just need to set the policy labels as workflows.argoproj.io/completed: 'true' under trigger template. In addition to labels, you can also define a backoff and option to error out if sensor is unable to determine status of the trigger after the backoff completes. Check out the specification of resource labels policy here . Status Policy \u00b6 For triggers like HTTP request or AWS Lambda, you can apply the Status Policy to determine the trigger status. The Status Policy supports list of expected response statuses. If the status of the HTTP request or Lambda is within the statuses defined in the policy, then the trigger is considered successful. Complete specification is available here .","title":"Policy"},{"location":"tutorials/07-policy/#policy","text":"A policy for a trigger determines whether the trigger resulted in success or failure. Currently, Argo Events supports 2 types of policies: Policy based on the K8s resource labels. Policy based on the response status for triggers like HTTP request, AWS Lambda, etc.","title":"Policy"},{"location":"tutorials/07-policy/#resource-labels-policy","text":"This type of policy determines whether trigger completed successfully based on the labels set on the trigger resource. Consider a sensor which has an Argo workflow as the trigger. When an Argo workflow completes successfully, the workflow controller sets a label on the resource as workflows.argoproj.io/completed: 'true' . So, in order for sensor to determine whether the trigger workflow completed successfully, you just need to set the policy labels as workflows.argoproj.io/completed: 'true' under trigger template. In addition to labels, you can also define a backoff and option to error out if sensor is unable to determine status of the trigger after the backoff completes. Check out the specification of resource labels policy here .","title":"Resource Labels Policy"},{"location":"tutorials/07-policy/#status-policy","text":"For triggers like HTTP request or AWS Lambda, you can apply the Status Policy to determine the trigger status. The Status Policy supports list of expected response statuses. If the status of the HTTP request or Lambda is within the statuses defined in the policy, then the trigger is considered successful. Complete specification is available here .","title":"Status Policy"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index 0edca7388c..6c70646cab 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,442 +2,447 @@ None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 daily None - 2024-09-20 + 2024-09-23 + daily + + + None + 2024-09-23 daily \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index ff446bcc256bed7c912881c5c5b9c38784d73445..64383616bef78f5744fe18bb019f8aa7c7fb58e7 100644 GIT binary patch delta 230 zcmey#_>+-czMF$1=JLmh?6UQmZ!d%#yJ?l?rN_|7?UArt#(YV+{HMiT-Y-7PJ9>P+ z{ZGT)r!7>{%{}F7R>p0LQ9XBk;mg|SRgWJU*M?i~HnhL2pr_`2(|hGE34i&h^PlH< z%HNHctHK?;dg;4q-7nHte9l?@MlJH$w}Z*M`z4)nzaO+-oPYb*!buiw$HXc>P78Z> zdrL*E*7=;!)w@q^f0tc!f7{Am53j!n{Uq>Jo|BJQ@bCV|-->#-v|adv>>p_}FaQ9w C+hURc literal 249 zcmb2|=HNKEy+*2tJ{6Fyl(cu4q T{PrJD>KL7Mr`o4#GcW)EmvW4j