diff --git a/app_builder_import_file/sigsci_TA_for_splunk-1_0_38_export.tgz b/app_builder_import_file/sigsci_TA_for_splunk-1_0_38_export.tgz index e614ece..5190f29 100644 Binary files a/app_builder_import_file/sigsci_TA_for_splunk-1_0_38_export.tgz and b/app_builder_import_file/sigsci_TA_for_splunk-1_0_38_export.tgz differ diff --git a/sigsci_TA_for_splunk-1.0.38.tgz b/sigsci_TA_for_splunk-1.0.38.tgz index e614ece..5190f29 100644 Binary files a/sigsci_TA_for_splunk-1.0.38.tgz and b/sigsci_TA_for_splunk-1.0.38.tgz differ diff --git a/sigsci_TA_for_splunk/README/inputs.conf.spec b/sigsci_TA_for_splunk/README/inputs.conf.spec index ff4d017..ae675ea 100644 --- a/sigsci_TA_for_splunk/README/inputs.conf.spec +++ b/sigsci_TA_for_splunk/README/inputs.conf.spec @@ -1,21 +1,21 @@ -[SigsciRequests://] -site_api_name = This is the API Name of the site to pull request data from. This should not be a URL. -request_limit = The amount of request objects returned in the array. Default: 100. Max:1000 -disable_catchup = Disables catch-up behavior. Request feed will always be ingested from now and the delta (and offset). We recommend keeping this as checked for request feeds with large amounts of requests. -twenty_hour_catchup = In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now - delta. Disable catchup must be false in order to work. -attack_and_anomaly_signals_only = Only retrieves requests that contain attack or anomaly signals. Please evaluate your signal configuration if there are overly inclusive signals creating excessive requests. -request_timeout = Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large. -read_timeout = Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large. - [SigsciEvent://] site_api_name = This is the Site API Name. It should not be a URL. -disable_catchup = Time is always set based from now - delta (Interval). Recommended to be True. Default: True. -twenty_hour_catchup = If last stored timestamp was over 24 hours, resets to exactly 24 hours ago instead to meet API limitations. +disable_catchup = Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True. +twenty_hour_catchup = In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be False in order to work. request_timeout = Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large. read_timeout = Configured Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large. [SigsciActivity://] -disable_catchup = -twenty_hour_catchup = +disable_catchup = Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True. +twenty_hour_catchup = In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be false in order to work. +request_timeout = Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large. +read_timeout = Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large. + +[SigsciRequests://] +site_api_name = This is the API Name of the site to pull request data from. This should not be a URL. +request_limit = The amount of request objects returned in the array. Default: 100. Max:1000 +disable_catchup = Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True. +twenty_hour_catchup = In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be False in order to work. +attack_and_anomaly_signals_only = Only retrieves requests that contain attack or anomaly signals. Please evaluate your signal configuration if there are overly inclusive signals creating excessive requests. request_timeout = Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large. read_timeout = Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large. \ No newline at end of file diff --git a/sigsci_TA_for_splunk/appserver/static/js/build/globalConfig.json b/sigsci_TA_for_splunk/appserver/static/js/build/globalConfig.json index e18b04a..3199a1b 100644 --- a/sigsci_TA_for_splunk/appserver/static/js/build/globalConfig.json +++ b/sigsci_TA_for_splunk/appserver/static/js/build/globalConfig.json @@ -247,10 +247,6 @@ "field": "site_api_name", "label": "Site API Name" }, - { - "field": "request_limit", - "label": "Request Limit" - }, { "field": "disable_catchup", "label": "Disable Catchup" @@ -259,17 +255,21 @@ "field": "twenty_hour_catchup", "label": "24 Hour Catchup" }, - { - "field": "attack_and_anomaly_signals_only", - "label": "Attack & Anomaly Signals Only" - }, { "field": "request_timeout", "label": "Request Timeout" }, { "field": "read_timeout", - "label": "Read Timeout" + "label": "read_timeout" + }, + { + "field": "request_limit", + "label": "Request Limit" + }, + { + "field": "attack_and_anomaly_signals_only", + "label": "Attack & Anomaly Signals Only" } ], "actions": [ @@ -281,8 +281,8 @@ }, "services": [ { - "name": "SigsciRequests", - "title": "SigsciRequests", + "name": "SigsciEvent", + "title": "SigsciEvent", "entity": [ { "field": "name", @@ -341,25 +341,9 @@ { "field": "site_api_name", "label": "Site API Name", - "help": "This is the API Name of the site to pull request data from. This should not be a URL.", - "required": true, - "type": "text", - "validators": [ - { - "type": "string", - "minLength": 0, - "maxLength": 8192, - "errorMsg": "Max length of text input is 8192" - } - ] - }, - { - "field": "request_limit", - "label": "Request Limit", - "help": "The amount of request objects returned in the array. Default: 100. Max:1000", + "help": "This is the Site API Name. It should not be a URL.", "required": true, "type": "text", - "defaultValue": "1000", "validators": [ { "type": "string", @@ -372,7 +356,7 @@ { "field": "disable_catchup", "label": "Disable Catchup", - "help": "Disables catch-up behavior. Request feed will always be ingested from now and the delta (and offset). We recommend keeping this as checked for request feeds with large amounts of requests. ", + "help": "Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", "required": false, "type": "checkbox", "defaultValue": true @@ -380,14 +364,7 @@ { "field": "twenty_hour_catchup", "label": "24 Hour Catchup", - "help": "In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now - delta. Disable catchup must be false in order to work.", - "required": false, - "type": "checkbox" - }, - { - "field": "attack_and_anomaly_signals_only", - "label": "Attack & Anomaly Signals Only", - "help": "Only retrieves requests that contain attack or anomaly signals. Please evaluate your signal configuration if there are overly inclusive signals creating excessive requests.", + "help": "In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be False in order to work. ", "required": false, "type": "checkbox" }, @@ -407,13 +384,13 @@ } ], "options": { - "placeholder": "Request Timeout" + "placeholder": "60" } }, { "field": "read_timeout", - "label": "Read Timeout", - "help": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", + "label": "read_timeout", + "help": "Configured Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "type": "text", "defaultValue": "60", @@ -424,13 +401,16 @@ "maxLength": 8192, "errorMsg": "Max length of text input is 8192" } - ] + ], + "options": { + "placeholder": "60" + } } ] }, { - "name": "SigsciEvent", - "title": "SigsciEvent", + "name": "SigsciActivity", + "title": "SigsciActivity", "entity": [ { "field": "name", @@ -486,25 +466,10 @@ } ] }, - { - "field": "site_api_name", - "label": "Site API Name", - "help": "This is the Site API Name. It should not be a URL.", - "required": true, - "type": "text", - "validators": [ - { - "type": "string", - "minLength": 0, - "maxLength": 8192, - "errorMsg": "Max length of text input is 8192" - } - ] - }, { "field": "disable_catchup", "label": "Disable Catchup", - "help": "Time is always set based from now - delta (Interval). Recommended to be True. Default: True.", + "help": "Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", "required": false, "type": "checkbox", "defaultValue": true @@ -512,7 +477,7 @@ { "field": "twenty_hour_catchup", "label": "24 Hour Catchup", - "help": "If last stored timestamp was over 24 hours, resets to exactly 24 hours ago instead to meet API limitations. ", + "help": "In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be false in order to work. ", "required": false, "type": "checkbox" }, @@ -537,8 +502,8 @@ }, { "field": "read_timeout", - "label": "read_timeout", - "help": "Configured Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", + "label": "Read Timeout", + "help": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "type": "text", "defaultValue": "60", @@ -557,8 +522,8 @@ ] }, { - "name": "SigsciActivity", - "title": "SigsciActivity", + "name": "SigsciRequests", + "title": "SigsciRequests", "entity": [ { "field": "name", @@ -614,10 +579,41 @@ } ] }, + { + "field": "site_api_name", + "label": "Site API Name", + "help": "This is the API Name of the site to pull request data from. This should not be a URL.", + "required": true, + "type": "text", + "validators": [ + { + "type": "string", + "minLength": 0, + "maxLength": 8192, + "errorMsg": "Max length of text input is 8192" + } + ] + }, + { + "field": "request_limit", + "label": "Request Limit", + "help": "The amount of request objects returned in the array. Default: 100. Max:1000", + "required": true, + "type": "text", + "defaultValue": "1000", + "validators": [ + { + "type": "string", + "minLength": 0, + "maxLength": 8192, + "errorMsg": "Max length of text input is 8192" + } + ] + }, { "field": "disable_catchup", "label": "Disable Catchup", - "help": "", + "help": "Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", "required": false, "type": "checkbox", "defaultValue": true @@ -625,7 +621,14 @@ { "field": "twenty_hour_catchup", "label": "24 Hour Catchup", - "help": "", + "help": "In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be False in order to work.", + "required": false, + "type": "checkbox" + }, + { + "field": "attack_and_anomaly_signals_only", + "label": "Attack & Anomaly Signals Only", + "help": "Only retrieves requests that contain attack or anomaly signals. Please evaluate your signal configuration if there are overly inclusive signals creating excessive requests.", "required": false, "type": "checkbox" }, @@ -645,7 +648,7 @@ } ], "options": { - "placeholder": "60" + "placeholder": "Request Timeout" } }, { @@ -662,10 +665,7 @@ "maxLength": 8192, "errorMsg": "Max length of text input is 8192" } - ], - "options": { - "placeholder": "60" - } + ] } ] } diff --git a/sigsci_TA_for_splunk/bin/SigsciActivity.py b/sigsci_TA_for_splunk/bin/SigsciActivity.py index 262657c..bf0464b 100644 --- a/sigsci_TA_for_splunk/bin/SigsciActivity.py +++ b/sigsci_TA_for_splunk/bin/SigsciActivity.py @@ -47,11 +47,11 @@ def get_scheme(self): For other input types, arguments should be get from input_module. Defining new input types could be easier. """ scheme.add_argument(smi.Argument("disable_catchup", title="Disable Catchup", - description="", + description="Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", required_on_create=False, required_on_edit=False)) scheme.add_argument(smi.Argument("twenty_hour_catchup", title="24 Hour Catchup", - description="", + description="In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. \'Disable Catchup\' must be false in order to work.", required_on_create=False, required_on_edit=False)) scheme.add_argument(smi.Argument("request_timeout", title="Request Timeout", diff --git a/sigsci_TA_for_splunk/bin/SigsciEvent.py b/sigsci_TA_for_splunk/bin/SigsciEvent.py index 7ae907e..abfa021 100644 --- a/sigsci_TA_for_splunk/bin/SigsciEvent.py +++ b/sigsci_TA_for_splunk/bin/SigsciEvent.py @@ -51,11 +51,11 @@ def get_scheme(self): required_on_create=True, required_on_edit=False)) scheme.add_argument(smi.Argument("disable_catchup", title="Disable Catchup", - description="Time is always set based from now - delta (Interval). Recommended to be True. Default: True.", + description="Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", required_on_create=False, required_on_edit=False)) scheme.add_argument(smi.Argument("twenty_hour_catchup", title="24 Hour Catchup", - description="If last stored timestamp was over 24 hours, resets to exactly 24 hours ago instead to meet API limitations.", + description="In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. \'Disable Catchup\' must be False in order to work.", required_on_create=False, required_on_edit=False)) scheme.add_argument(smi.Argument("request_timeout", title="Request Timeout", diff --git a/sigsci_TA_for_splunk/bin/SigsciRequests.py b/sigsci_TA_for_splunk/bin/SigsciRequests.py index 7a1ad04..05dc101 100644 --- a/sigsci_TA_for_splunk/bin/SigsciRequests.py +++ b/sigsci_TA_for_splunk/bin/SigsciRequests.py @@ -55,11 +55,11 @@ def get_scheme(self): required_on_create=True, required_on_edit=False)) scheme.add_argument(smi.Argument("disable_catchup", title="Disable Catchup", - description="Disables catch-up behavior. Request feed will always be ingested from now and the delta (and offset). We recommend keeping this as checked for request feeds with large amounts of requests.", + description="Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", required_on_create=False, required_on_edit=False)) scheme.add_argument(smi.Argument("twenty_hour_catchup", title="24 Hour Catchup", - description="In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now - delta. Disable catchup must be false in order to work.", + description="In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. \'Disable Catchup\' must be False in order to work.", required_on_create=False, required_on_edit=False)) scheme.add_argument(smi.Argument("attack_and_anomaly_signals_only", title="Attack & Anomaly Signals Only", diff --git a/sigsci_TA_for_splunk/bin/file.txt b/sigsci_TA_for_splunk/bin/file.txt deleted file mode 100644 index 84218de..0000000 --- a/sigsci_TA_for_splunk/bin/file.txt +++ /dev/null @@ -1,525 +0,0 @@ -APPNETA -PATHVIEW -PATHTEST -CHARGEN -CSCOSLA -CMIP -CRITTER -CTF -DAYTIME -DCP -DISCARD -ECHO -FINGER -ICMP -ICMPV6 -IPERF -IXCHRIOT -NAVERISK -OPALIS -SGMP -SMUX -SNMP -STATSRV -SYSLOG -SYSTAT -TIVOLI -TRIPWIRE -UMA -ZABBIX -3COMTSMX -3PC -914CG -ACAS -ACI -ACTIVDIR -ACTVSYNC -AD_BKUP -AD_DRS -AD_DSAOP -AD_DSROL -AD_FRS -AD_NSP -AD_RSTOR -AD_XDS -AED512 -ALIAS -AMQP -AN -ANET -ANSANTFY -ANSATRDR -APPLE -APPLARP -APPLSHAR -APPLTALK -ARCISDMS -ARIEL -ARIS -ARNS -ARP -ASA -ATMFATE -ATMMPOA -AUDIT -AUDITD -AURORA -AX25 -BACNET -BBNRCC -BGMP -BGP -BH611 -BHEVENT -BHFHS -BHMDS -BJNP -BJNPDISC -BLCKJACK -BNA -BNET -BONJOUR -BRSATMON -CA_CERT -CBLPRTAX -CAILIC -CAP -CNA -CAPWAP -CBT -CCP -CDC -CFTP -CHAOS -CHAP -CSCODRP -CISCOFNA -CSCOGDP -CISCOSYS -CISCOTNA -CL1 -CLRCASE -CLOANTO -CLDRDNSH -CMTP -CODAAUTH -COMPAQ -CMPRSNET -COMSCM -CORBA -CORERJD -COSEM -COTP -COVIA -CPHB -CPNX -CR_LIST -CRTP -CRUDP -CSISGWP -CSNETNS -CVCHOSTD -DASP -DATEXASN -DCAP -DCCP -DCERPC -DCNMEAS -DDP -DDX -DECAUTH -DECDEBUG -DECVMS -DGP -DHCP -DHCPV6 -DIAMETER -DIRECT -DIXIE -DLS -DNACML -DNP3 -DNS -DNSIX -DPSI -DSFGW -DSP -DSP3270 -DSR -DSSETUP -DTAG -DTK -EGP -EIGRP -ELISAVIH -EMBLNDT -EMCON -EMFIS -ENCAP -NTRSTIME -EPMAP -ESRO -ETH -ETHERIP -ENIP -ETOS -FATMEN -FC -FILEMAKR -HBCI -FIRE -FIX -GACP -GPITNP -GENIE -GENRAD -GGP -GIST -GMTP -GOOGZIP -GOOSE -GSE -GSSLIC -GSUITE -GTP_C -GTP_P -GTP_U -HASSLE -HDAP -HEMS -HIP -HMP -HOSTNAME -HPPERF -HSRP -HTTPMGT -HYPERG -IASD -IATP -IBMAPP -IBMOPC -ICAD -ICECAST -ICP -IDENT -IDP -IDPR -IDRP -IEC104 -IFMP -IGMP -IL -IMSP -INBSNESS -INLSP -I2P -IP -IPCP -IPCV -IPFIX -IPLT -IPPC -IPV6 -IPV6CP -IPX -IRTP -IS99 -ISAKMP -ISCSI -ISIGL -ISIS -ISOTSAP -ISOIP -JARGON -JAVARMI -KBLOCK -KERBEROS -KIS -KNETCMP -KRYPTLAN -KVM -LAMAINT -LARP -LCP -LEAF1 -LEAF2 -LEGENT -LEVEL3 -LPR -LINK -LLMNR -LOCUSCON -LOCUSMAP -LSARPC -LYNCCTRL -LYNCSHRE -MGNTALOG -MANET -MASQDIAL -MATIP -MDNS -MERIT -META5 -METAGRAM -MFCOBOL -MFENSP -MFTP -MITSPL -MLDEV -MANMSGSP -MOBILE -MOBILIP -MODBUS -MRTGWARE -MPLSMC -MPLSUC -MPLSINIP -MPP -MPTN -CRS -MSSMSRVR -MSG -MTP -MULTPLEX -MUMPS -MUX -NAMP -NARP -NCED -NCLD -NDMP -NDSAUTH -NTBIOSDG -NTBIOSNS -NTBIOSSS -NETBLT -NETFLOW -NETINFO -NETLOGON -NETSC -NETSCOUT -NETWARE -NIP -NNSP -NPP -NSFNETIG -NSIIOPS -NSRMP -NSS -NSSTP -NTP -NVPII -NXEDIT -NXTSTEP -OCBINDER -OCS -OCSERVER -OCSP -ODMR -ONMUX -ONSHAPE -OPCUA -OPENPORT -OSPF -OSUNMS -PAP -PAWSERV -PDAP -PRSNLINK -PGM -PIM -PIP -PIPE -PKIXTS -PNNI -PPP -PPPDISC -PPPSESS -PPPCOMP -PRINTER -PJL -PRINTSRV -PRM -PROFILE -PROSPERO -PTP -PUBNUB -PUP -PVP -PWDGEN -QBIK -QNX -QUIC -RADIUS -RADIUSAC -RAP -RARP -RESCAP -RIP -RIPNG -RLP -RMT -ROHC -RPC2PMAP -RRP -RSVD -RSVP -RSVPE2EI -RSYNC -RUDP -RVD -SAMR -SAPPROTO -SATEXPAK -SATMON -SCCM -SCCP -SCCSP -SCOI2DLG -SCPS -SCSIST -SCTP -SDRP -AMQPS -SCURSGHT -SEMANTIX -SEND -SET -SGCP -SHRNKRAP -SLVRPLTR -SKIP -SLOW -SMAKYNET -SMARTSDP -SMP -SMPTE -SMSP -SNET -SNP -SNPP -SOFTPC -SPOOLSS -SPRITE -SPS -SRC -SRMP -SRP -SRSSEND -SSCOPMCE -SSDP -ST -STP -STUN -SUNRPC -SUNND -SURMEAS -SVMTP -SVRLOC -SYNOTICS -T120 -TACNEWS -TACACS -TCF -TCP -TCPMUX -TCXFLASH -TCXMEDIA -TCXSOUND -TCXUSB -TEREDO -TEXAR -TIMBUKTU -TIME -TLSP -TOBITDAV -TP -TPPP -TRACERUT -TRUNK1 -TRUNK2 -TTP -UAAC -UARPS -UDP -UDPLITE -UIS -ULSTPROC -UDLDM -UNIFY -UPS -UTI -UTMP -VETTCP -VINES -VXLAN -VISA -VMNET -VMPWSCS -VMTP -VMWARE -VMVIEW -VMOTION -VSPHERE -VRRP -VSLMP -WBEXPAK -WBMON -WCCP -WEBFLTR -WEBSOCK -WESP -WHOIS -WINS -WSN -WSP -WYSE_TCX -X11 -X.25 -XBONE -XDMCP -XFER -XNET -XNS -XNSAUTH -XNSCH -XNSTIME -XTP -XYPLEX -Z3950 -ZEBRA -AMMYY -ANYDESK -CTRXCGP -CTRXICA -CTRXIMA -CTRXLIC -CTRXRTMP -CTRXSLGW -WANSCALR -ERPC -GOMREMOT -HPVMM -KTELNET -KWDB -LOGMEIN -NATERMT -PCANYWHR -PCOIP -RDP -RJS -RTELNET -RJE -RLOGIN -RSH -SCCMCTRL -SHOWMYPC -SNAGAS -SOPHRED -SSH -SUTLNT -SUPDUP -TEAMVIEW -TELNET -TN3270 -VNC diff --git a/sigsci_TA_for_splunk/bin/input_module_SigsciActivity.py b/sigsci_TA_for_splunk/bin/input_module_SigsciActivity.py index fd47cdd..992baca 100644 --- a/sigsci_TA_for_splunk/bin/input_module_SigsciActivity.py +++ b/sigsci_TA_for_splunk/bin/input_module_SigsciActivity.py @@ -3,7 +3,7 @@ import json import time from datetime import datetime -from sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time +from sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time, validate_timeouts """ IMPORTANT @@ -18,29 +18,10 @@ def validate_input(helper, definition): - # Read Timeout passed to send_http_request. Type: float. - # https://docs.splunk.com/Documentation/AddonBuilder/4.1.4/UserGuide/PythonHelperFunctions - # We do this per input module as splunk provides no way to validate global configuration arguments :') request_timeout = definition.parameters.get("request_timeout", None) - if request_timeout is None: - raise ValueError("Request timeout configuration is missing") - try: - request_timeout = float(request_timeout) - except ValueError: - raise ValueError(f"Invalid request timeout value: {request_timeout}") - if request_timeout > 300.0 or request_timeout <= 0: - raise ValueError(f"Request timeout must be between 0 and 300 seconds, got {request_timeout}") - - # Read Timeout passed to send_http_request. Type: float. read_timeout = definition.parameters.get("read_timeout", None) - if read_timeout is None: - raise ValueError("Read timeout configuration is missing") - try: - read_timeout = float(read_timeout) - except ValueError: - raise ValueError(f"Invalid read timeout value: {read_timeout}") - if read_timeout > 300.0 or read_timeout <= 0: - raise ValueError(f"Read timeout must be between 0 and 300 seconds, got {read_timeout}") + validate_timeouts(request_timeout, read_timeout) + # Catchup Opts twenty_hour_catchup = definition.parameters.get('twenty_hour_catchup', None) disable_catchup = definition.parameters.get('disable_catchup', None) diff --git a/sigsci_TA_for_splunk/bin/input_module_SigsciEvent.py b/sigsci_TA_for_splunk/bin/input_module_SigsciEvent.py index 5d0c57b..df85116 100644 --- a/sigsci_TA_for_splunk/bin/input_module_SigsciEvent.py +++ b/sigsci_TA_for_splunk/bin/input_module_SigsciEvent.py @@ -4,7 +4,7 @@ import json import time from datetime import datetime -from sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time +from sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time, validate_timeouts """ IMPORTANT @@ -19,29 +19,9 @@ def validate_input(helper, definition): - # Read Timeout passed to send_http_request. Type: float. - # https://docs.splunk.com/Documentation/AddonBuilder/4.1.4/UserGuide/PythonHelperFunctions - # We do this per input module as splunk provides no way to validate global configuration arguments :') request_timeout = definition.parameters.get("request_timeout", None) - if request_timeout is None: - raise ValueError("Request timeout configuration is missing") - try: - request_timeout = float(request_timeout) - except ValueError: - raise ValueError(f"Invalid request timeout value: {request_timeout}") - if request_timeout > 300.0 or request_timeout <= 0: - raise ValueError(f"Request timeout must be between 0 and 300 seconds, got {request_timeout}") - - # Read Timeout passed to send_http_request. Type: float. read_timeout = definition.parameters.get("read_timeout", None) - if read_timeout is None: - raise ValueError("Read timeout configuration is missing") - try: - read_timeout = float(read_timeout) - except ValueError: - raise ValueError(f"Invalid read timeout value: {read_timeout}") - if read_timeout > 300.0 or read_timeout <= 0: - raise ValueError(f"Read timeout must be between 0 and 300 seconds, got {read_timeout}") + validate_timeouts(request_timeout, read_timeout) site_name = definition.parameters.get("site_api_name", None) if site_name is None or site_name == "": diff --git a/sigsci_TA_for_splunk/bin/input_module_SigsciRequests.py b/sigsci_TA_for_splunk/bin/input_module_SigsciRequests.py index 9573597..7ca9775 100644 --- a/sigsci_TA_for_splunk/bin/input_module_SigsciRequests.py +++ b/sigsci_TA_for_splunk/bin/input_module_SigsciRequests.py @@ -2,7 +2,7 @@ from timeit import default_timer as timer import time from datetime import datetime, timezone, timedelta -from sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time +from sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time, validate_timeouts """ IMPORTANT @@ -11,7 +11,6 @@ This file is generated only once when creating the modular input. """ - # def use_single_instance_mode(): # return True @@ -28,25 +27,8 @@ def validate_input(helper,definition): # https://docs.splunk.com/Documentation/AddonBuilder/4.1.4/UserGuide/PythonHelperFunctions # We do this per input module as splunk provides no way to validate global configuration arguments :') request_timeout = definition.parameters.get("request_timeout", None) - if request_timeout is None: - raise ValueError("Request timeout configuration is missing") - try: - request_timeout = float(request_timeout) - except ValueError: - raise ValueError(f"Invalid request timeout value: {request_timeout}") - if request_timeout > 300.0 or request_timeout <= 0: - raise ValueError(f"Request timeout must be between 0 and 300 seconds, got {request_timeout}") - - # Read Timeout passed to send_http_request. Type: float. read_timeout = definition.parameters.get("read_timeout", None) - if read_timeout is None: - raise ValueError("Read timeout configuration is missing") - try: - read_timeout = float(read_timeout) - except ValueError: - raise ValueError(f"Invalid read timeout value: {read_timeout}") - if read_timeout > 300.0 or read_timeout <= 0: - raise ValueError(f"Read timeout must be between 0 and 300 seconds, got {read_timeout}") + validate_timeouts(request_timeout, read_timeout) twenty_hour_catchup = definition.parameters.get('twenty_hour_catchup', None) disable_catchup = definition.parameters.get('disable_catchup', None) diff --git a/sigsci_TA_for_splunk/bin/sigsci_helper.py b/sigsci_TA_for_splunk/bin/sigsci_helper.py index 2b700a0..d96a715 100644 --- a/sigsci_TA_for_splunk/bin/sigsci_helper.py +++ b/sigsci_TA_for_splunk/bin/sigsci_helper.py @@ -5,6 +5,30 @@ import time import requests + +def validate_timeouts(request_timeout, read_timeout): + # Read Timeout passed to send_http_request. Type: float. + # https://docs.splunk.com/Documentation/AddonBuilder/4.1.4/UserGuide/PythonHelperFunctions + # We do this per input module as splunk provides no way to validate global configuration arguments. + if request_timeout is None: + raise ValueError("Request timeout configuration is missing") + try: + request_timeout = float(request_timeout) + except ValueError: + raise ValueError(f"Invalid request timeout value: {request_timeout}") + if request_timeout > 300.0 or request_timeout <= 0: + raise ValueError(f"Request timeout must be between 0 and 300 seconds, got {request_timeout}") + + # Read Timeout passed to send_http_request. Type: float. + if read_timeout is None: + raise ValueError("Read timeout configuration is missing") + try: + read_timeout = float(read_timeout) + except ValueError: + raise ValueError(f"Invalid read timeout value: {read_timeout}") + if read_timeout > 300.0 or read_timeout <= 0: + raise ValueError(f"Read timeout must be between 0 and 300 seconds, got {read_timeout}") + def check_response( code, response_text, @@ -203,8 +227,8 @@ def get_results(title, helper, config): number_requests_per_page = len(response["data"]) except KeyError: number_requests_per_page = 0 - helper.log_error("Invalid response") - exit(1) # we should probably break this flow. + helper.log_error(f"Invalid response: {response_result}") + break helper.log_info(f"Number of {title} for Page: {number_requests_per_page}") diff --git a/sigsci_TA_for_splunk/default/inputs.conf b/sigsci_TA_for_splunk/default/inputs.conf index 77e2178..40e6c97 100644 --- a/sigsci_TA_for_splunk/default/inputs.conf +++ b/sigsci_TA_for_splunk/default/inputs.conf @@ -1,20 +1,18 @@ -[SigsciRequests] +[SigsciEvent] start_by_shell = false python.version = python3 -sourcetype = sigsci-requests +sourcetype = sigsci-event interval = 300 -request_limit = 1000 disable_catchup = True twenty_hour_catchup = False -attack_and_anomaly_signals_only = False request_timeout = 60 read_timeout = 60 disabled = 0 -[SigsciEvent] +[SigsciActivity] start_by_shell = false python.version = python3 -sourcetype = sigsci-event +sourcetype = sigsci-activity interval = 300 disable_catchup = True twenty_hour_catchup = False @@ -22,13 +20,15 @@ request_timeout = 60 read_timeout = 60 disabled = 0 -[SigsciActivity] +[SigsciRequests] start_by_shell = false python.version = python3 -sourcetype = sigsci-activity +sourcetype = sigsci-requests interval = 300 +request_limit = 1000 disable_catchup = True twenty_hour_catchup = False +attack_and_anomaly_signals_only = False request_timeout = 60 read_timeout = 60 disabled = 0 @@ -51,13 +51,13 @@ request_timeout = 60 twenty_hour_catchup = False [SigsciRequests://Demo_SigsciRequests] -disable_catchup = true +disable_catchup = 1 interval = 300 -read_timeout = 60 +read_timeout = 300 request_limit = 1000 request_timeout = 60 site_api_name = YourSite attack_and_anomaly_signals_only = False -twenty_hour_catchup = False +twenty_hour_catchup = 0 disabled = 1 diff --git a/sigsci_TA_for_splunk/default/restmap.conf b/sigsci_TA_for_splunk/default/restmap.conf index 4c335c4..ef46041 100644 --- a/sigsci_TA_for_splunk/default/restmap.conf +++ b/sigsci_TA_for_splunk/default/restmap.conf @@ -1,7 +1,7 @@ [admin:sigsci_TA_for_splunk] match = / -members = sigsci_TA_for_splunk_settings, sigsci_TA_for_splunk_SigsciRequests, sigsci_TA_for_splunk_SigsciEvent, sigsci_TA_for_splunk_SigsciActivity +members = sigsci_TA_for_splunk_settings, sigsci_TA_for_splunk_SigsciEvent, sigsci_TA_for_splunk_SigsciActivity, sigsci_TA_for_splunk_SigsciRequests [admin_external:sigsci_TA_for_splunk_settings] handlertype = python @@ -10,23 +10,23 @@ handlerfile = sigsci_TA_for_splunk_rh_settings.py handleractions = edit, list handlerpersistentmode = true -[admin_external:sigsci_TA_for_splunk_SigsciRequests] +[admin_external:sigsci_TA_for_splunk_SigsciEvent] handlertype = python python.version = python3 -handlerfile = sigsci_TA_for_splunk_rh_SigsciRequests.py +handlerfile = sigsci_TA_for_splunk_rh_SigsciEvent.py handleractions = edit, list, remove, create handlerpersistentmode = true -[admin_external:sigsci_TA_for_splunk_SigsciEvent] +[admin_external:sigsci_TA_for_splunk_SigsciActivity] handlertype = python python.version = python3 -handlerfile = sigsci_TA_for_splunk_rh_SigsciEvent.py +handlerfile = sigsci_TA_for_splunk_rh_SigsciActivity.py handleractions = edit, list, remove, create handlerpersistentmode = true -[admin_external:sigsci_TA_for_splunk_SigsciActivity] +[admin_external:sigsci_TA_for_splunk_SigsciRequests] handlertype = python python.version = python3 -handlerfile = sigsci_TA_for_splunk_rh_SigsciActivity.py +handlerfile = sigsci_TA_for_splunk_rh_SigsciRequests.py handleractions = edit, list, remove, create handlerpersistentmode = true diff --git a/sigsci_TA_for_splunk/default/web.conf b/sigsci_TA_for_splunk/default/web.conf index c1a8803..98f630a 100644 --- a/sigsci_TA_for_splunk/default/web.conf +++ b/sigsci_TA_for_splunk/default/web.conf @@ -7,14 +7,6 @@ methods = POST, GET pattern = sigsci_TA_for_splunk_settings/* methods = POST, GET, DELETE -[expose:sigsci_TA_for_splunk_SigsciRequests] -pattern = sigsci_TA_for_splunk_SigsciRequests -methods = POST, GET - -[expose:sigsci_TA_for_splunk_SigsciRequests_specified] -pattern = sigsci_TA_for_splunk_SigsciRequests/* -methods = POST, GET, DELETE - [expose:sigsci_TA_for_splunk_SigsciEvent] pattern = sigsci_TA_for_splunk_SigsciEvent methods = POST, GET @@ -31,6 +23,14 @@ methods = POST, GET pattern = sigsci_TA_for_splunk_SigsciActivity/* methods = POST, GET, DELETE +[expose:sigsci_TA_for_splunk_SigsciRequests] +pattern = sigsci_TA_for_splunk_SigsciRequests +methods = POST, GET + +[expose:sigsci_TA_for_splunk_SigsciRequests_specified] +pattern = sigsci_TA_for_splunk_SigsciRequests/* +methods = POST, GET, DELETE + [expose:_splunkd_data] pattern = data/* methods = GET diff --git a/sigsci_TA_for_splunk/sigsci_TA_for_splunk.aob_meta b/sigsci_TA_for_splunk/sigsci_TA_for_splunk.aob_meta index 89f7938..a650353 100644 --- a/sigsci_TA_for_splunk/sigsci_TA_for_splunk.aob_meta +++ b/sigsci_TA_for_splunk/sigsci_TA_for_splunk.aob_meta @@ -1 +1 @@ -{"basic_builder": {"appname": "sigsci_TA_for_splunk", "friendly_name": "Signal Sciences WAF TA", "version": "1.0.38", "author": "Fastly", "description": "For users of Fastly who would like to enrich their Splunk data with information from Fastly. This app with simple configuration enabled the importing of Events, Activity, and raw request information to Splunk.\n\nThis is an open source project, no support provided, public repository is available and installation documentation can be found at https://github.com/fastly/sigsci-splunk-app. The best way to report issues with the app is to create an Issue on the github page so that it can be tracked.", "theme": "#ea3f23", "large_icon": "iVBORw0KGgoAAAANSUhEUgAAAEgAAABICAYAAABV7bNHAAAAAXNSR0IArs4c6QAAD+5JREFUeF7tXAuMVNUZ/s69d96wCwULGoWGiFofNT6KsVEEhYqiEJAKiq0ixSpLa1EKKqAGBXV5RAlugkYXUFCK5dHEtKXl0QqrrNhoVojBRxsUUEGW3Z2dnZn7ar6zc8bLPnBn791BTU+y2dmz5/7n/P+c/z//4ztXuK7rIteSySRM04QQAvF4HOFwWP6ntrZWDZH9kUhE/m1ZFhoaGuRn9vF/bOl0Gk1NTfKzpmkoLS2FdeQIxJQpEP/+d55WZz6IK6+EePllqLWSRigUQrdu3fLk1HoNw0AikZBrsG0b9fX1+TE9e/bMf25sbEQ2m5V/e/mrq6uDKKaAtNGjoVVVdUYuXz8zfDiwefP/BdSuFIstoKNHj+ZVjNuU25Vax21HdWPzbsdUKoVMJiP7uYW7d+/eSq2i0ShisZjsdxwHcqumUkg8+CD0PXsgPvkE4tixgnaS27cv3D59YF1yCRoXLZIqxbWycZ1UOdXUetnP9XINuq6jpKSklRqyg2pIc0K+OV6pG02DKJaApD06cgTdIhGIu++G9pe/FCQg87e/hV1Whoymwendu3gCamxszO8gGjPVKE1lv9vrV0aYv73jaeT5oxq/QdVIKzRxIvT16wsT0IwZsObNa3NNaqd652hv7rb6vettyXfRjLRa2LFjx9B98uTCBTR9OuwFC0D1ZfvenWJ+BWRNmwZr4cLiC6iYNohCouFzx46FvmlTYSr20EMwZ8/OHxDfKyNdf+gQjJoaGFu3IpROQ9uwAeKzzwoSkHPJJbAvuwx2NAr78ssRGjoU4V69vrunGD3rTFUVIo8/DvHmmzyLIdJpWvOCBNNqMI0/PflwGM4558B6+GG4w4bJ47lLjvkgPekYT8GdO+Fu3gxRWQlx+LA/YXTwafeUU+BOmgQxZAgwdCiO5cKcb0+oYVmIb9qE8J/+1Lxb6uo6yFrAw2IxuIMHI33ttUjfdhuMaNR/LOY4Tn7P03tmAEq/gJ6w8lTpCavGfhXE2pkMslu2IDpzJrS9ewPm1h8559xzYS5eDGPoUGjhsAxWvd42DwvVqJ4qavDyx0C805608fHHCK9YgfDq1QWHDf5YL+DpaBSZ22+HOXEinAsvLF6ooVVWIlJeDrF/fwGrPXlD3dNOg/W738GYOTPv4XtTOCeMxZLJZF7FGNBRvehuMzxQoQaNHRsDTH3xYugVFUBj48njuDMzGwacm2+G9dRTcHv3hmXbeSqKb3aQbxUayf6OnmLawYOIP/AAQn/9KzNlnVnit+IZ68orkVqyBCWXXppfj/+E2eHDKL3sMmj//a9/P+ZbICY3kUD6wAHEcob6hAL6plDDePdddBs/vmg+TbHkR98p+dJLsAYN6nw+CG+/jfj06dDee69Y6y7qPM6PfoSmJ59EaMyY9hNmtbW1eSNNa06DTOOc3r8foenTEdq4saiLLvZk1rBhwLPPQh84UPLNYoPyiZgtbdtINzQgPncuwqtWFXu9J2U+86ab0LhwIdwePTpW1Qjv2IHE2LHf6dOqIEnrOjLl5UhNmtRaQJZl5VWM9Szr4EGUnnVWQfQLGkw/64c/lD/OqafCGj8e1uDBELW1iF9/vSTlbNuG5Omng19UaOVK6B9/DBw6BPHFF116ita/9x4iZ58tzQwbQ5DjQo1YJILwjTdC27q1IJ47NFgIOMznjBkD/YorYJ1/PtKAjJHYetTWwr3zTkAIJJ9+Gna/frKfaRP+iLffhr1jB7S//Q36li1dIijmnJL/+AfsXEqmVVUjtn49otOmsTTaIZ47Osjt3x9N8+cjO3QoSvv2hchVOul/KAH1pE+Sm7c+m80v0luxbWLJqb4e4W3bEHvoIQj6ZUE2w0Dm3nuRmj1bUpUCUqGG09CA2LhxMHbuDG5KTUP21lthLVoEN1caVtu3vXCGk1No3oqKqqp4wwAtmYQ+YwZC69fLmltQjX5R+pVXIPr0kaqWP8Wc5cshZs2CCCqXo+tofOYZWLfdJr8J1VjVIPOMc1gHVwLzBo8s8PH/bCxSqmwhUxEtqxrhTZsQv+8+acMCafE43PnzIX7/++aCJ/NBLgEIf/gDtOeeC2QONxZDauVKmMOHS+CAqr6SOAEEFBD7ybASkDfnxIS8V0A8PJQ9UgLy5nC46xO//jXE558Hsn5z7FiIpUuRoh/EUCNUVYUEkReHDvmewO3VC3Z5OXSmQGmYc6VnRbhHjx6yn2p0nA3yoC0oRGWbvDbIW/ZuWdXIrl2LGBN3AfCAWAzJFSsQHzcO4uiRI26UxMvKfAsHug7zV7+CvXgxojmGiyWgZH094i++iMicOUAOyuKHofTjjyMyYwZE/WefufGpU6H/+c9+6MlnnYEDkdy9W6ZqVVpWAQIUcdodtbNoX1TuhWGOanT3Vb+ipf3nP8hu3470iBEQJSXy6FfqyTSxAlQkBg+GHkDs6PzsZ3A3boRwDx1y3fPPh/jqK/8Cqq6G9tOftgmg8mOkrXXrEJs9G9qXX8KuqJDq217p2dq/H0b//r55QTyOut27Ier27nVLzj3XN0Hn4ouhvfOOpNMWwqwzAuIuspctQ2jmTB5nkF/k7t2yLnai2rx1110wnn/eN0/JrVshUqtXu7GJE/0Ri0TQuHo1wqNGSTo0sNz2bFQnpW78m9gbqh37qSLKx1EqwjFUK+PoUYglS6AtWiTpuOPHI11ZmVcrRtxKDSViJIcV4lhz507ER4yQQvXTzHnzIKyRI1399df90AGTT03btyNz6qmSTlsAqkJOsfTOnQjdfz/0XbvkYwQu6AsWoIlBZY7pE9XmS5JJiFGjoL37ri++7Ouug7AHDXK16mp/hMaMgblsGZpyqLJCBSSSScQ2b4axdi1QUiJjQen46TqcyZOBRYugde8uHcaOCKiHrsOeMcO3mjn9+0M4/fq5fss3ZlkZrEceAXO9Sq2U6qiQQn0D7FeVE/7P3rMHiVGjWvtgQoB03Vmz5A5l81ZaFB2pfrkqTH4Ox4FWUYHwrFm+vni3X79gBEQQgTtgAERpKbQ1awoy0jqdsTagMAxwsWcPsoZxwlBD2SwFA3aefBLOa69By2ah1dT4ExCzCEHsIIm2YKr2ggvg7NghU5YKCKkw12ql9J7VLmM4ER0xAtobb7RmJJGAXVd3HC2vf8WTUnnbkk4OeSbGjm3GPxL256l9dUZSLqGEQQjI/OUvJQTFLC2FedVV0olTC6ZaKLA5F8lAVIUadAjDS5YgMm9eM0Oelr3/fqTmzJEnoELMUijKBtHh9KJcVUAb+uc/ET98GHZVFYzKys7IJf+M27NnQAK6+24JsEznMnGFGGknk0H3sjLo69blF+Zccw3qXnlF7sqOxmLHwYCjUThPPAHtscf8CYg2yB4yxNW2b/dFyL7qKpgrV8LKIb645b15H6VunIQ7QoUa9JVU3ifU0AC7pgbagAGwiYnOrchLi+OVWnH3ePNEqhLBxyKNjdAYPhWIpG0pBPessyDS8+e7kVwGrdNSikSQfucdRM87T5IIypMmrW/KB7U00vy7rqYGJddfXzDMryX/zGWJzI4dbviKKzotG/Vg5rnnEJky5VshoMYNG5qrMj5bE1FyyX373MRPfuI7D02fIbtvn1yS1y+hOnmB6MqXUeBtBThXqsPn2/NxvLS8PpG3n88agwZB+PSiuY5UdTXEsX373BK61Cyt+GyZJUuQuuMO36FGZxJmykjHdu1C9LrrfHICuD/4AdyaGojagwfdBOvTS5f6J5pIyLKJceGFrS6zKOJdlVGkgLSvvkLJxRdD5O6w+WHIHj0a+sqVEOlUytU3boQxaZLv6Jc1Leuaa5BhAaBvX7m+YqmYQ5+KWILVq307iFy3+eKLsG6+ubmq4TKPc8cdEO+/70fozc8y4p42DU0PPgg3HM7fOFSEg65qNE+pI/7MMzDo9+SucPlhxGVWYs0a1F90EYRpmq5DZOucOQgtXuyH7tfPUki/+Q3Sc+dC79Yt71VzgLqqqfJEyoCrfo6hc6j66fsoH4e+lfKe6VtJw57JwFi6FLEFCwLZOZzfmjoV7hNPyKtXQsFfQv/6F2L33tuMIguiaRqsESPkVs3k7riSrLrUxlOIQlGnl/eym7faQcdShS30ibyhBukZEyZA27w5MMwk8QJNCxfCHDlSlquOhwFPmIAQJwuwEVLS9PLLyF56qVS5QIy0acLg5bwhQwJHvpnDhiH5xz9KCcjSs/dCHaqrEbvhBnYGKCJ55xHmz3+O7C23QKeHmwNpeS+vGakUuIsZtGYGD4aTK1Uf5/uYJuwtWxDesAGhTZuCx2fHYsi++iokqEr5Y8ehXOvrEZk/H6Hy8mAFlKPG6NghZnnkSGTvuguRM87I56vrq6vRfdw4idoQf/87cM458impVp9+isi6dQixZn7gALd9l6zPnjABdmUlwrnUSbvXwkuuvhp6AJ7oCbnQdemMOQx0eRc+k0GEhpZGcu5cWIYB4803ob/1FsSnnwZmgNtbE8Hmx95/vzWAquWdVbml9+6F8YtfBOJdd8lXHTBRAsvttWthsVjY4q5u23c1HAfmqlUI33OP7xgtYF66hJy7bJm8ie1qWsevhTd98YW8dhCuqAgUf9MlHHaWaCSC7KRJ0ObNg8E7Z23dm89ms3mMoirGUc1koqqhARp9o1df7RLIW2f5Cuo5d/hwOC+8gCxVLJe/pjOqoDd0Rr/xroZ8kMfza699f4REICmzhR98IGV9wqsI3h1EidHDVWXhPIiJ8LxZs6CvWUPoZ1Bf4MmhEwrBufVWmMuX58MZag43Qiu+M5mvQw2u1ou09yLOZRhA8MCKFdAefRTiyy9PDnN+Z00k4Dz8MMTUqajz3FhStwzbRNp/02UWrsn7chN71SqI8nJoe/b4XW5Rn3fPPhsuAVGTJ8ud0uELdel0Om+k2yvttkyH6vRmKypg8GJdwJDhwKUWjSJ7++3ysMl6cEPKfHC+9tK30tx09EKdWrh6Q5Nr28ju2oUQ3+Tis8QbuFBUaHPmmbCefVZe7HU07eS8gSr5+eeILl+O8MaN0IJIuAUgLefHP0Z29Gg4ZWWInnaav1d0NTQ05FVMFeNorJioUgAl9c4yGSd5inde4JLz0Uewt21D9KWXoL31VgBsFk6CN5vTU6YgOmwY7AED5PpVcbIlkMsL2FK+T0u+ZZGzUCPdkTdQhfbtQ+yRR6SgBN+CwDSo31dStCUvlrpjMdjnnYfUU0/BuuACOeo78wYq88gRaFu2wHrjDeiffCIzBBLs3Vlh0cHr0wd8aYAYOBC46CJg1CjYvXt33Su6Wl6HUg4Tt5eqryvICr8dL/yWYxUKnmOVKraCv0SjvBQiX9HVwBjvwAHoH34Io6pKXnGS15yYpPNAgTmXy1fp9O8vo2ybAunXD/FevUCwelM4DCsnaC/8RXnG/M1+8kFToFK8aiN6YcdUN4Wp9PJHbfkf65FaJpqTIWoAAAAASUVORK5CYII=", "small_icon": "iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAADhAJiYAAAAAXNSR0IArs4c6QAABiNJREFUWEetWFtIVF0U/s6Zm859JCV8SoKgjMS0C0SUFQVJYQSlFUlJ9eBDZBFB+Wag9FJSkRQGQuRb9hDdKI2E/JUopKwISoiIUObijHN1zv75tjq/l3Eu+u+nxTlrr7X2Wmt/a62tCCGEpmnw+XxwuVyIRqMYHx+XNJfH44HVaoXBYEjQWm8vTHv2yP8Lru/f4bHb5W/KCoVCCIfDkh4bG0M8Hpe0EAJerxdOpxMTExNQAoGAUBQFer0esVgMqqpCp9NJmouGkJEbSVOQ6O2FeffulPaEhoagW7lS8lAWZVI2aeqiTtLTuqUxigLF6/UKMprNZgQCAanUZDJJmove4eloCGmekputT59CV1OT1KjIq1cIl5VJfi7KokzKJk1d1EmaRlgsFhkVGq0sJmQUHOroQG5tbXKDnjyBae9eGeKsQ+Z2u6WHHA6HFGA0GqXFM4X5/X7pFcacp6KrbY8eQV9Xl9Sg8IsXCJWXz8rD3Nxc5OTkSLk2m02GjTQ9xPxhHslQRiIROkkq5AfSDA+9MDP+IhqF6fRpqF1dQCSSOqGn/xoM0KqqELl3j8kIXp65OTlXd9qQBd6+haW2FsrPn5kZsQCXKCzEeGcnrFu3pr5lC4ZsZASOykqoAwNLMmTuZm3dOoh//oFuKnzzQhaPx2XImCd2u13mB2+Vc/nyzEOTrcmqCt/fv7Dl5cmdxCXmFVNlPg719yN31y5A07JVkzV/uLsb2saNMncTODQzZL7hYTiKirIWvJQN/k+fYCsu/u+WJXDI7YaroAAQYinys9+rKPD++QNHQcGklxJIvW8fdG/eZCxQ27AB0Y4OhJ1OOM6cQVzTEGhvhzMYhHboENT+/sxllZYi0NMzidQej0cotHDNmowERI8fR/DGDQmkLCORSETSTEiCJmmelKXAfv481Pb2jOT6BgehKyqaLB1QlIw2BZubYWhoSFR+Ii8RmIjL0/GWkmaS8taQNj18CHN9fUbyY9EoFPfoqHAtW5Z2Q+TmTZjq6xOlY25LQa8QOvh9Ji2La08PrBUVaXWMu91Q4leuCLWpKSWztn49MDAgFdIj9ABp1j1WcdL0EKu4xLMtW6C9e4fAFHTQW1plJfTPnqXUE6urgxKrqBD67u6UjBPd3dBv3z6vWZsbMjYbKvPp8GHoOjtnFejYly8wpMnTeFkZFC0vTyhu98IG6XTwezzzqv3MkDmGhoDRUahVVdBqaqB1dCTCR8HMJVnt2UFONX7JFAqHA4owmwWCwYUNMpkQ+/oVhhUrknrIeOAAdM+fy/3RixcxfunSrKTmdxof/vEDOWvXAqHQwrpyczMwSFWhbduG8cePZT/DnpttBGkmb05hIcFMKmHTpq+ulv/JRx4uwoNlxw6o79+nBV5FOBwCPt/CVhuN8I+OLhiy6K9fsNy+DaWkBGNVVUlvWSJkTmfqgm02Q5nYvFno+vpSZ7/bDYPLlTapk+FQImQ+H3I4yaQoTdrq1VAiR48K44MHKQ2KnDqFYEvLrBY2GxySo1RjIwy3bqWGl8pKKL7Pn4W9uDgtaPm+fYNj1apFAaN3eBjODLqI8IcP2ZUO7+/fsOTnZ1U6ZMjy84F4PO2hJ0uH2y2M3d2wHDyYdgNrXuD1a8RKSuZNo8lKh21kBHqCYQbGjLe1QRw5MjWXxeNQOdSFw+mNAiBsNoz19UlsYrmInjsHPWW0tsrEN4yOwlJeDmVsLCN5MBohQiFMsIWdbtACg4Owl5ZmJmCaS1EgXC4oU2AnCGwcDrNs8gIvX8Kyc+dkgzazhY00NcHU2JidUUvkJrobW1qStLBTrx/ayZNQ799foprMtov9+4GurtmvH36/X75+cKIk3HOszmlogNrWlpnURXJp1dUI3r0rd7ONmX4JmRWymbP9WH8/7Js2LVJd6m3i40dZapLO9qleP1gkY5cvw9Tc/L8YFj17FoZr1+Cfet5J+mCVyetHgFf5wgWYOjqAiYmsjYucOAG0tsJktaZ//Zgeg/gEw/aTucS2gTQX289gMCinCtIcs03Xr0O9c4djLxT2UtNTLmHAYoGwWiGOHYNy9aqUQVlsdZkrpIldbHlJM3/5sMWJhd/+BXe3XAACKxnuAAAAAElFTkSuQmCC", "visible": true, "tab_version": "4.1.4", "tab_build_no": "0", "build_no": 18}, "data_input_builder": {"datainputs": [{"index": "default", "sourcetype": "sigsci-requests", "interval": "300", "use_external_validation": true, "streaming_mode_xml": true, "name": "SigsciRequests", "title": "SigsciRequests", "description": "", "type": "customized", "parameters": [{"name": "site_api_name", "label": "Site API Name", "help_string": "This is the API Name of the site to pull request data from. This should not be a URL.", "required": true, "format_type": "text", "default_value": "", "placeholder": "", "type": "text", "value": "jeremycx"}, {"name": "request_limit", "label": "Request Limit", "help_string": "The amount of request objects returned in the array. Default: 100. Max:1000", "required": true, "format_type": "text", "default_value": "1000", "placeholder": "", "type": "text", "value": "1000"}, {"name": "disable_catchup", "label": "Disable Catchup", "help_string": "Disables catch-up behavior. Request feed will always be ingested from now and the delta (and offset). We recommend keeping this as checked for request feeds with large amounts of requests. ", "required": false, "format_type": "checkbox", "default_value": true, "type": "checkbox", "value": false}, {"name": "twenty_hour_catchup", "label": "24 Hour Catchup", "help_string": "In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now - delta. Disable catchup must be false in order to work.", "required": false, "format_type": "checkbox", "default_value": false, "type": "checkbox", "value": true}, {"name": "attack_and_anomaly_signals_only", "label": "Attack & Anomaly Signals Only", "help_string": "Only retrieves requests that contain attack or anomaly signals. Please evaluate your signal configuration if there are overly inclusive signals creating excessive requests.", "required": false, "format_type": "checkbox", "default_value": false, "type": "checkbox", "value": false}, {"name": "request_timeout", "label": "Request Timeout", "help_string": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "Request Timeout", "type": "text", "value": "60"}, {"name": "read_timeout", "label": "Read Timeout", "help_string": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "", "type": "text", "value": "60"}], "data_inputs_options": [{"type": "customized_var", "name": "site_api_name", "title": "Site API Name", "description": "This is the API Name of the site to pull request data from. This should not be a URL.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "", "placeholder": ""}, {"type": "customized_var", "name": "request_limit", "title": "Request Limit", "description": "The amount of request objects returned in the array. Default: 100. Max:1000", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "1000", "placeholder": ""}, {"type": "customized_var", "name": "disable_catchup", "title": "Disable Catchup", "description": "Disables catch-up behavior. Request feed will always be ingested from now and the delta (and offset). We recommend keeping this as checked for request feeds with large amounts of requests. ", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": true}, {"type": "customized_var", "name": "twenty_hour_catchup", "title": "24 Hour Catchup", "description": "In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now - delta. Disable catchup must be false in order to work.", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": false}, {"type": "customized_var", "name": "attack_and_anomaly_signals_only", "title": "Attack & Anomaly Signals Only", "description": "Only retrieves requests that contain attack or anomaly signals. Please evaluate your signal configuration if there are overly inclusive signals creating excessive requests.", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": false}, {"type": "customized_var", "name": "request_timeout", "title": "Request Timeout", "description": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "Request Timeout"}, {"type": "customized_var", "name": "read_timeout", "title": "Read Timeout", "description": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": ""}], "customized_options": [{"name": "site_api_name", "value": "jeremycx"}, {"name": "request_limit", "value": "1000"}, {"name": "disable_catchup", "value": true}, {"name": "twenty_hour_catchup", "value": false}, {"name": "attack_and_anomaly_signals_only", "value": false}, {"name": "request_timeout", "value": "60"}, {"name": "read_timeout", "value": "60"}], "code": "# encoding = utf-8\nfrom timeit import default_timer as timer\nimport time\nfrom datetime import datetime, timezone, timedelta\nfrom sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time\n\n\"\"\"\n IMPORTANT\n Edit only the validate_input and collect_events functions.\n Do not edit any other part in this file.\n This file is generated only once when creating the modular input.\n\"\"\"\n\n\n# def use_single_instance_mode():\n# return True\n\ndef validate_input(helper,definition):\n request_limit = int(definition.parameters.get(\"request_limit\", None))\n if request_limit is None or request_limit == \"\":\n raise ValueError('The request limit cannot be blank')\n if request_limit <= 0:\n raise ValueError('The request limit cannot be 0')\n if request_limit > 1000:\n raise ValueError('Request Limit cannot be greater than 1000')\n\n # Read Timeout passed to send_http_request. Type: float.\n # https://docs.splunk.com/Documentation/AddonBuilder/4.1.4/UserGuide/PythonHelperFunctions\n # We do this per input module as splunk provides no way to validate global configuration arguments :')\n request_timeout = definition.parameters.get(\"request_timeout\", None)\n if request_timeout is None:\n raise ValueError(\"Request timeout configuration is missing\")\n try:\n request_timeout = float(request_timeout)\n except ValueError:\n raise ValueError(f\"Invalid request timeout value: {request_timeout}\")\n if request_timeout > 300.0 or request_timeout <= 0:\n raise ValueError(f\"Request timeout must be between 0 and 300 seconds, got {request_timeout}\")\n\n # Read Timeout passed to send_http_request. Type: float.\n read_timeout = definition.parameters.get(\"read_timeout\", None)\n if read_timeout is None:\n raise ValueError(\"Read timeout configuration is missing\")\n try:\n read_timeout = float(read_timeout)\n except ValueError:\n raise ValueError(f\"Invalid read timeout value: {read_timeout}\")\n if read_timeout > 300.0 or read_timeout <= 0:\n raise ValueError(f\"Read timeout must be between 0 and 300 seconds, got {read_timeout}\")\n\n twenty_hour_catchup = definition.parameters.get('twenty_hour_catchup', None)\n disable_catchup = definition.parameters.get('disable_catchup', None)\n if twenty_hour_catchup and disable_catchup is True:\n raise ValueError(f\"Catch up values are mutually exclusive\")\n\n site_name = definition.parameters.get(\"site_api_name\", None)\n if site_name is None or site_name == \"\":\n msg = \"The site_name can not be empty\"\n raise ValueError(\"InvalidSiteName\", msg)\n elif \"http\" in site_name:\n msg = (\n \"The site name is not the full URL it should be the \",\n \"API Name of the site like 'my_example_site'\",\n )\n raise ValueError(\"InvalidSiteName\", msg)\n elif \" \" in site_name:\n msg = (\n \"The site name should be the API Name of the site like \",\n \"not the Display Name. Example would be 'my_site_name' instead of \",\n \"My Site Name\",\n )\n raise ValueError(\"InvalidSiteName\", msg)\n pass\n\n\ndef collect_events(helper, ew):\n start = timer()\n loglevel = helper.get_log_level()\n helper.set_log_level(loglevel)\n # Proxy setting configuration\n # proxy_settings = helper.get_proxy()\n api_host = \"https://dashboard.signalsciences.net\"\n global_email = helper.get_global_setting(\"email\")\n global_api_token = helper.get_global_setting(\"api_token\")\n global_corp_api_name = helper.get_global_setting(\"corp_api_name\")\n helper.log_info(\"email: %s\" % global_email)\n helper.log_info(\"corp: %s\" % global_corp_api_name)\n \n # Request / Read Timeouts\n request_timeout = float(helper.get_arg(\"request_timeout\"))\n read_timeout = float(helper.get_arg('read_timeout'))\n helper.log_info(f\"request configuration is: request:{request_timeout}, read: {read_timeout}\")\n\n # Config declaration.\n twenty_hour_catchup = helper.get_arg('twenty_hour_catchup')\n helper.log_info(f\"twenty four hour catchup is: {twenty_hour_catchup}\")\n\n disable_catchup = helper.get_arg('disable_catchup')\n helper.log_info(f\"disable catchup is: {disable_catchup}\")\n\n attack_and_anomaly_signals_only = helper.get_arg('attack_and_anomaly_signals_only')\n helper.log_info(f\"attack signals only is: {attack_and_anomaly_signals_only}\")\n\n def pull_requests(helper, current_site, delta, key=None):\n site_name = current_site\n last_name = f\"requests_last_until_time_{current_site}\"\n last_run_until = helper.get_check_point(last_name)\n request_limit = helper.get_arg('request_limit')\n helper.log_info(f\"request limit: {request_limit}\")\n\n if last_run_until is None:\n helper.log_info(\"no last_run_time found in checkpoint state\")\n helper.log_debug(\"get_from_until\")\n until_time, from_time = get_from_and_until_times(\n helper, delta, five_min_offset=True\n )\n else:\n helper.log_info(f\"last_run_until found in state: {last_run_until}\")\n helper.log_debug(\"get_until\")\n until_time, from_time = get_until_time(\n helper, last_run_until, delta, twenty_hour_catchup, disable_catchup, five_min_offset=True\n )\n\n if from_time is None:\n helper.log_info(f\"{last_run_until} >= current now time, skipping run\")\n return\n\n if from_time >= until_time:\n helper.save_check_point(last_name, from_time)\n helper.log_info(\n f\"from_time {from_time} >= until_time {until_time}, skipping run\"\n )\n return\n\n helper.log_info(\"SiteName: %s\" % site_name)\n helper.log_info(f\"Start Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(from_time))}\")\n helper.log_info(f\"End Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(until_time))}\")\n\n input_name = helper.get_input_stanza_names()\n single_name = \"\"\n\n if type(input_name) is dict and input_name > 1:\n helper.log_info(\"Multi instance mode\")\n for current_name in input_name:\n single_name = current_name\n else:\n helper.log_info(\"Single instance mode\")\n helper.log_info(\"Inputs: %s\" % input_name)\n helper.log_info(\"Inputs Num: %s\" % len(input_name))\n single_name = input_name\n helper.log_info(f\"single_name: {single_name}\")\n\n # Loop across all the data and output it in one big JSON object\n url = (\n f\"{api_host}/api/v0/corps/{global_corp_api_name}\"\n f\"/sites/{site_name}/feed/requests\"\n f\"?limit={request_limit}\"\n f\"&from={from_time}&until={until_time}\"\n )\n if attack_and_anomaly_signals_only:\n attack_signals = [\n \"USERAGENT\",\n \"AWS-SSRF\",\n \"BACKDOOR\",\n \"CMDEXE\",\n \"SQLI\",\n \"TRAVERSAL\",\n \"XSS\",\n \"XXE\"\n ]\n anomaly_signals = [\n \"2FA-DISABLED\", \"2FA-CHANGED\", \"ABNORMALPATH\", \"ADDRESS-CHANGED\", \"ALLOWED\",\n \"BHH\", \"BLOCKED\", \"BODY-PARSER-EVASION\", \"CODEINJECTION\", \"COMPRESSED\",\n \"CC-VAL-ATTEMPT\", \"CC-VAL-FAILURE\", \"CC-VAL-SUCCESS\", \"CVE-2017-5638\",\n \"CVE-2017-7269\", \"CVE-2017-9805\", \"CVE-2018-11776\", \"CVE-2018-15961\",\n \"CVE-2018-9206\", \"CVE-2019-0192\", \"CVE-2019-0193\", \"CVE-2019-0232\",\n \"CVE-2019-11580\", \"CVE-2019-14234\", \"CVE-2019-16759\", \"CVE-2019-2725\",\n \"CVE-2019-3396\", \"CVE-2019-3398\", \"CVE-2019-5418\", \"CVE-2019-6340\",\n \"CVE-2019-8394\", \"CVE-2019-8451\", \"CVE-2021-26084\", \"CVE-2021-26855\",\n \"CVE-2021-40438\", \"CVE-2021-44228\", \"CVE-2021-44228-STRICT\",\n \"CVE-2022-22963\", \"CVE-2022-22965\", \"CVE-2022-26134\", \"CVE-2022-42889\",\n \"CVE-2023-34362\", \"CVE-2023-38218\", \"DATACENTER\", \"DOUBLEENCODING\",\n \"EMAIL-CHANGED\", \"EMAIL-VALIDATION\", \"FORCEFULBROWSING\", \"GC-VAL-ATTEMPT\",\n \"GC-VAL-FAILURE\", \"GC-VAL-SUCCESS\", \"GRAPHQL-API\", \"GRAPHQL-DUPLICATE-VARIABLES\",\n \"GRAPHQL-IDE\", \"GRAPHQL-INTROSPECTION\", \"GRAPHQL-DEPTH\",\n \"GRAPHQL-MISSING-REQUIRED-OPERATION-NAME\",\n \"GRAPHQL-UNDEFINED-VARIABLES\", \"HTTP403\", \"HTTP404\", \"HTTP429\",\n \"HTTP4XX\", \"HTTP500\", \"HTTP503\", \"HTTP5XX\", \"IMPOSTOR\", \"INFO-VIEWED\",\n \"INSECURE-AUTH\", \"NOTUTF8\", \"INVITE-FAILURE\", \"INVITE-ATTEMPT\",\n \"INVITE-SUCCESS\", \"JSON-ERROR\", \"KBA-CHANGED\", \"LOGINATTEMPT\",\n \"LOGINDISCOVERY\", \"LOGINFAILURE\", \"LOGINSUCCESS\", \"MALFORMED-DATA\",\n \"SANS\", \"MESSAGE-SENT\", \"NO-CONTENT-TYPE\", \"NOUA\", \"NULLBYTE\",\n \"OOB-DOMAIN\", \"PW-CHANGED\", \"PW-RESET-ATTEMPT\", \"PW-RESET-FAILURE\",\n \"PW-RESET-SUCCESS\", \"PRIVATEFILE\", \"rate-limit\", \"REGATTEMPT\", \"REGFAILURE\",\n \"REGSUCCESS\", \"RSRC-ID-ENUM-ATTEMPT\", \"RSRC-ID-ENUM-FAILURE\",\n \"RSRC-ID-ENUM-SUCCESS\", \"RESPONSESPLIT\", \"SCANNER\", \"SIGSCI-IP\",\n \"TORNODE\", \"WRONG-API-CLIENT\", \"USER-ID-ENUM-ATTEMPT\",\n \"USER-ID-ENUM-FAILURE\", \"USER-ID-ENUM-SUCCESS\", \"WEAKTLS\", \"XML-ERROR\"\n ]\n attack_tags = \",\".join(attack_signals)\n anomaly_tags = \",\".join(anomaly_signals)\n url = f\"{url}&tags={attack_tags},{anomaly_tags}\"\n config = Config(\n url=url,\n api_host=api_host,\n from_time=from_time,\n until_time=until_time,\n global_email=global_email,\n global_corp_api_name=global_corp_api_name,\n current_site=current_site,\n request_timeout=request_timeout,\n read_timeout=read_timeout,\n )\n config.headers = {\n \"Content-type\": \"application/json\",\n \"x-api-user\": global_email,\n \"x-api-token\": global_api_token,\n \"User-Agent\": config.user_agent_string,\n }\n\n all_requests = get_results(\"Requests\", helper, config)\n\n total_requests = len(all_requests)\n helper.log_info(\"Total Requests Pulled: %s\" % total_requests)\n if total_requests == 0:\n helper.save_check_point(last_name, until_time)\n helper.log_info(\n f\"No events to write, saving checkpoint to value:{until_time}\"\n )\n write_start = timer()\n event_count = 0\n for current_event in all_requests:\n if key is None:\n source_type = helper.get_sourcetype()\n event = helper.new_event(\n source=single_name,\n index=helper.get_output_index(),\n sourcetype=source_type,\n data=current_event,\n )\n else:\n indexes = helper.get_output_index()\n current_index = indexes[key]\n types = helper.get_sourcetype()\n source_type = types[key]\n single_name = single_name[0]\n event = helper.new_event(\n source=single_name,\n index=current_index,\n sourcetype=source_type,\n data=current_event,\n )\n\n try:\n ew.write_event(event)\n event_count += 1 # increment the count for successful events to not spam debug.\n except Exception as e:\n helper.log_error(f\"error writing event: {e}\")\n helper.log_error(event)\n raise e\n if event_count != 0: # We save the checkpoint earlier on 0 events.\n helper.log_info(f\"{event_count} events written, saving checkpoint: {until_time}\")\n helper.save_check_point(last_name, until_time)\n write_end = timer()\n write_time = write_end - write_start\n write_time_result = round(write_time, 2)\n helper.log_info(\"Total Event Output Time: %s seconds\" % write_time_result)\n\n # If multiple inputs configured it creates an array of values and the\n # script only gets called once per Input configuration\n all_sites = helper.get_arg(\"site_api_name\")\n time_deltas = helper.get_arg(\"interval\")\n helper.log_info(f\"interval: {time_deltas}\")\n\n if type(all_sites) is dict:\n helper.log_info(\"run_type: Sequential\")\n for active_input, site in all_sites.items():\n time_delta = int(time_deltas[active_input])\n helper.log_info(\"site: %s\" % site)\n pull_requests(helper, key=active_input, current_site=site, delta=time_delta)\n helper.log_info(\"Finished Pulling Requests for %s\" % site)\n else:\n helper.log_info(\"Run Type: Concurrent\")\n site = helper.get_arg(\"site_api_name\")\n helper.log_info(\"site: %s\" % site)\n pull_requests(helper, current_site=site, delta=int(time_deltas))\n helper.log_info(\"Finished Pulling Requests for %s\" % site)\n end = timer()\n total_time = end - start\n time_result = round(total_time, 2)\n helper.log_info(\"Total Script Time: %s seconds\" % time_result)", "uuid": "aaaeb391da9043e1819408033d9db708", "sample_count": "267"}, {"index": "default", "sourcetype": "sigsci-event", "interval": "300", "use_external_validation": true, "streaming_mode_xml": true, "name": "SigsciEvent", "title": "SigsciEvent", "description": "", "type": "customized", "parameters": [{"name": "site_api_name", "label": "Site API Name", "help_string": "This is the Site API Name. It should not be a URL.", "required": true, "format_type": "text", "default_value": "", "placeholder": "", "type": "text", "value": "jeremycx"}, {"name": "disable_catchup", "label": "Disable Catchup", "help_string": "Time is always set based from now - delta (Interval). Recommended to be True. Default: True.", "required": false, "format_type": "checkbox", "default_value": true, "type": "checkbox", "value": true}, {"name": "twenty_hour_catchup", "label": "24 Hour Catchup", "help_string": "If last stored timestamp was over 24 hours, resets to exactly 24 hours ago instead to meet API limitations. ", "required": false, "format_type": "checkbox", "default_value": false, "type": "checkbox", "value": false}, {"name": "request_timeout", "label": "Request Timeout", "help_string": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "60", "type": "text", "value": "60"}, {"name": "read_timeout", "label": "read_timeout", "help_string": "Configured Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "60", "type": "text", "value": "60"}], "data_inputs_options": [{"type": "customized_var", "name": "site_api_name", "title": "Site API Name", "description": "This is the Site API Name. It should not be a URL.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "", "placeholder": ""}, {"type": "customized_var", "name": "disable_catchup", "title": "Disable Catchup", "description": "Time is always set based from now - delta (Interval). Recommended to be True. Default: True.", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": true}, {"type": "customized_var", "name": "twenty_hour_catchup", "title": "24 Hour Catchup", "description": "If last stored timestamp was over 24 hours, resets to exactly 24 hours ago instead to meet API limitations. ", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": false}, {"type": "customized_var", "name": "request_timeout", "title": "Request Timeout", "description": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "60"}, {"type": "customized_var", "name": "read_timeout", "title": "read_timeout", "description": "Configured Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "60"}], "code": "# encoding = utf-8\nfrom timeit import default_timer as timer\nimport requests\nimport json\nimport time\nfrom datetime import datetime\nfrom sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time\n\n\"\"\"\n IMPORTANT\n Edit only the validate_input and collect_events functions.\n Do not edit any other part in this file.\n This file is generated only once when creating the modular input.\n\"\"\"\n\n\n# def use_single_instance_mode():\n# return True\n\n\ndef validate_input(helper, definition):\n # Read Timeout passed to send_http_request. Type: float.\n # https://docs.splunk.com/Documentation/AddonBuilder/4.1.4/UserGuide/PythonHelperFunctions\n # We do this per input module as splunk provides no way to validate global configuration arguments :')\n request_timeout = definition.parameters.get(\"request_timeout\", None)\n if request_timeout is None:\n raise ValueError(\"Request timeout configuration is missing\")\n try:\n request_timeout = float(request_timeout)\n except ValueError:\n raise ValueError(f\"Invalid request timeout value: {request_timeout}\")\n if request_timeout > 300.0 or request_timeout <= 0:\n raise ValueError(f\"Request timeout must be between 0 and 300 seconds, got {request_timeout}\")\n\n # Read Timeout passed to send_http_request. Type: float.\n read_timeout = definition.parameters.get(\"read_timeout\", None)\n if read_timeout is None:\n raise ValueError(\"Read timeout configuration is missing\")\n try:\n read_timeout = float(read_timeout)\n except ValueError:\n raise ValueError(f\"Invalid read timeout value: {read_timeout}\")\n if read_timeout > 300.0 or read_timeout <= 0:\n raise ValueError(f\"Read timeout must be between 0 and 300 seconds, got {read_timeout}\")\n \n site_name = definition.parameters.get(\"site_api_name\", None)\n if site_name is None or site_name == \"\":\n msg = \"The site_name can not be empty\"\n raise ValueError(\"InvalidSiteName\", msg)\n elif \"http\" in site_name:\n msg = (\n \"The site name is not the full URL it should be the \",\n \"API Name of the site like 'my_example_site'\",\n )\n raise ValueError(\"InvalidSiteName\", msg)\n elif \" \" in site_name:\n msg = (\n \"The site name should be the API Name of the site like \",\n \"not the Display Name. Example would be 'my_site_name' instead of \",\n \"My Site Name\",\n )\n raise ValueError(\"InvalidSiteName\", msg)\n\n # Catchup Opts\n twenty_hour_catchup = definition.parameters.get('twenty_hour_catchup', None)\n disable_catchup = definition.parameters.get('disable_catchup', None)\n if twenty_hour_catchup and disable_catchup is True:\n raise ValueError(f\"Catch up values are mutually exclusive\")\n \n pass\n\n\ndef collect_events(helper, ew):\n start = timer()\n loglevel = helper.get_log_level()\n # Proxy setting configuration\n # proxy_settings = helper.get_proxy()\n global_email = helper.get_global_setting(\"email\")\n global_api_token = helper.get_global_setting(\"api_token\")\n global_corp_api_name = helper.get_global_setting(\"corp_api_name\")\n api_host = \"https://dashboard.signalsciences.net\"\n helper.log_info(\"email: %s\" % global_email)\n helper.log_info(\"corp: %s\" % global_corp_api_name)\n \n # Request / Read Timeouts\n request_timeout = float(helper.get_arg(\"request_timeout\"))\n read_timeout = float(helper.get_arg('read_timeout'))\n helper.log_info(f\"request configuration is: request:{request_timeout}, read: {read_timeout}\")\n \n # Config Declaration\n twenty_hour_catchup = helper.get_arg('twenty_hour_catchup')\n helper.log_info(f\"twenty four hour catchup is: {twenty_hour_catchup}\")\n \n disable_catchup = helper.get_arg('disable_catchup')\n helper.log_info(f\"disable catchup is: {disable_catchup}\")\n\n def pull_events(current_site, delta, key=None):\n site_name = current_site\n last_name = f\"events_last_until_time_{current_site}\"\n last_run_until = helper.get_check_point(last_name)\n helper.log_info(f\"last_run_until: {last_run_until}\")\n if last_run_until is None:\n (until_time, from_time) = get_from_and_until_times(\n helper, delta, five_min_offset=False\n )\n else:\n (until_time, from_time) = get_until_time(\n helper, last_run_until, delta, twenty_hour_catchup=twenty_hour_catchup, catchup_disabled=disable_catchup, five_min_offset=False\n )\n if from_time is None or from_time > until_time:\n helper.log_info(f\"{from_time} >= current now time, skipping run\")\n return\n if from_time >= until_time:\n helper.save_check_point(last_name, from_time)\n helper.log_info(\n f\"from_time {from_time} >= until_time {until_time}, skipping run\"\n )\n return\n helper.save_check_point(last_name, until_time)\n helper.log_info(\"SiteName: %s\" % site_name)\n\n helper.log_info(f\"Start Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(from_time))}\")\n helper.log_info(f\"End Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(until_time))}\")\n\n input_name = helper.get_input_stanza_names()\n single_name = \"\"\n\n if type(input_name) is dict and input_name > 1:\n helper.log_info(\"Multi instance mode\")\n for current_name in input_name:\n single_name = current_name\n else:\n helper.log_info(\"Single instance mode\")\n helper.log_info(\"Inputs: %s\" % input_name)\n helper.log_info(\"Inputs Num: %s\" % len(input_name))\n single_name = input_name\n helper.log_info(f\"single_name: {single_name}\")\n\n # Loop across all the data and output it in one big JSON object\n url = (\n f\"{api_host}/api/v0/corps/{global_corp_api_name}\"\n f\"/sites/{site_name}/activity?\"\n f\"from={from_time}&until={until_time}\"\n )\n helper.log_info(\"Pulling results from Events API\")\n config = Config(\n url=url,\n api_host=api_host,\n from_time=from_time,\n until_time=until_time,\n global_email=global_email,\n global_corp_api_name=global_corp_api_name,\n current_site=current_site,\n request_timeout=request_timeout,\n read_timeout=read_timeout,\n )\n config.headers = {\n \"Content-type\": \"application/json\",\n \"x-api-user\": global_email,\n \"x-api-token\": global_api_token,\n \"User-Agent\": config.user_agent_string,\n }\n all_events = get_results(\"Events\", helper, config)\n total_requests = len(all_events)\n helper.log_info(\"Total Events Pulled: %s\" % total_requests)\n write_start = timer()\n for current_event in all_events:\n helper.log_debug(current_event)\n if key is None:\n source_type = helper.get_sourcetype()\n helper.log_info(\"Concurrent Mode\")\n source_type_info = type(source_type)\n active_index = helper.get_output_index()\n index_info = type(active_index)\n single_name_info = type(single_name)\n current_event_info = type(current_event)\n helper.log_info(f\"source_type: {source_type}\")\n helper.log_info(f\"source_type_info: {source_type_info}\")\n helper.log_info(f\"index: {active_index}\")\n helper.log_info(f\"index_info: {index_info}\")\n helper.log_info(f\"single_name: {single_name}\")\n helper.log_info(f\"single_name_info: {single_name_info}\")\n helper.log_info(f\"current_event: {current_event}\")\n helper.log_info(f\"current_event_info: {current_event_info}\")\n event = helper.new_event(\n source=single_name,\n index=helper.get_output_index(),\n sourcetype=source_type,\n data=current_event,\n )\n else:\n indexes = helper.get_output_index()\n current_index = indexes[key]\n types = helper.get_sourcetype()\n source_type = types[key]\n single_name = single_name[0]\n helper.log_info(\"Sequential Mode\")\n helper.log_info(f\"source_type: {source_type}\")\n helper.log_info(f\"index: {current_index}\")\n helper.log_info(f\"single_name: {single_name}\")\n helper.log_info(f\"current_event: {current_event}\")\n event = helper.new_event(\n source=single_name,\n index=current_index,\n sourcetype=source_type,\n data=current_event,\n )\n\n try:\n ew.write_event(event)\n except Exception as e:\n raise e\n write_end = timer()\n write_time = write_end - write_start\n write_time_result = round(write_time, 2)\n helper.log_info(\"Total Event Output Time: %s seconds\" % write_time_result)\n\n # If multiple inputs configured it creates an array of values and the\n # script only gets called once per Input configuration\n\n all_sites = helper.get_arg(\"site_api_name\")\n time_deltas = helper.get_arg(\"interval\")\n helper.log_info(f\"interval: {time_deltas}\")\n if type(all_sites) is dict:\n helper.log_info(\"run_type: Sequential\")\n for active_input in all_sites:\n site = all_sites[active_input]\n current_delta = int(time_deltas[active_input])\n helper.log_info(\"site: %s\" % site)\n pull_events(key=active_input, current_site=site, delta=current_delta)\n helper.log_info(\"Finished Pulling Events for %s\" % site)\n else:\n helper.log_info(\"Run Type: Concurrent\")\n site = helper.get_arg(\"site_api_name\")\n helper.log_info(\"site: %s\" % site)\n pull_events(current_site=site, delta=int(time_deltas))\n helper.log_info(\"Finished Pulling Events for %s\" % site)\n end = timer()\n total_time = end - start\n time_result = round(total_time, 2)\n helper.log_info(\"Total Script Time: %s seconds\" % time_result)", "customized_options": [{"name": "site_api_name", "value": "jeremycx"}, {"name": "disable_catchup", "value": false}, {"name": "twenty_hour_catchup", "value": false}, {"name": "request_timeout", "value": "60"}, {"name": "read_timeout", "value": "60"}], "uuid": "294ad5bbdf92407b9a6785b46106152a", "sample_count": 0}, {"index": "default", "sourcetype": "sigsci-activity", "interval": "300", "use_external_validation": true, "streaming_mode_xml": true, "name": "SigsciActivity", "title": "SigsciActivity", "description": "", "type": "customized", "parameters": [{"name": "disable_catchup", "label": "Disable Catchup", "help_string": "", "required": false, "format_type": "checkbox", "default_value": true, "type": "checkbox", "value": true}, {"name": "twenty_hour_catchup", "label": "24 Hour Catchup", "help_string": "", "required": false, "format_type": "checkbox", "default_value": false, "type": "checkbox", "value": false}, {"name": "request_timeout", "label": "Request Timeout", "help_string": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "60", "type": "text", "value": "60"}, {"name": "read_timeout", "label": "Read Timeout", "help_string": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "60", "type": "text", "value": "60"}], "data_inputs_options": [{"type": "customized_var", "name": "disable_catchup", "title": "Disable Catchup", "description": "", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": true}, {"type": "customized_var", "name": "twenty_hour_catchup", "title": "24 Hour Catchup", "description": "", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": false}, {"type": "customized_var", "name": "request_timeout", "title": "Request Timeout", "description": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "60"}, {"type": "customized_var", "name": "read_timeout", "title": "Read Timeout", "description": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "60"}], "code": "# encoding = utf-8\nfrom timeit import default_timer as timer\nimport json\nimport time\nfrom datetime import datetime\nfrom sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time\n\n\"\"\"\n IMPORTANT\n Edit only the validate_input and collect_events functions.\n Do not edit any other part in this file.\n This file is generated only once when creating the modular input.\n\"\"\"\n\n\n# def use_single_instance_mode():\n# return True\n\n\ndef validate_input(helper, definition):\n # Read Timeout passed to send_http_request. Type: float.\n # https://docs.splunk.com/Documentation/AddonBuilder/4.1.4/UserGuide/PythonHelperFunctions\n # We do this per input module as splunk provides no way to validate global configuration arguments :')\n request_timeout = definition.parameters.get(\"request_timeout\", None)\n if request_timeout is None:\n raise ValueError(\"Request timeout configuration is missing\")\n try:\n request_timeout = float(request_timeout)\n except ValueError:\n raise ValueError(f\"Invalid request timeout value: {request_timeout}\")\n if request_timeout > 300.0 or request_timeout <= 0:\n raise ValueError(f\"Request timeout must be between 0 and 300 seconds, got {request_timeout}\")\n\n # Read Timeout passed to send_http_request. Type: float.\n read_timeout = definition.parameters.get(\"read_timeout\", None)\n if read_timeout is None:\n raise ValueError(\"Read timeout configuration is missing\")\n try:\n read_timeout = float(read_timeout)\n except ValueError:\n raise ValueError(f\"Invalid read timeout value: {read_timeout}\")\n if read_timeout > 300.0 or read_timeout <= 0:\n raise ValueError(f\"Read timeout must be between 0 and 300 seconds, got {read_timeout}\")\n # Catchup Opts\n twenty_hour_catchup = definition.parameters.get('twenty_hour_catchup', None)\n disable_catchup = definition.parameters.get('disable_catchup', None)\n if twenty_hour_catchup and disable_catchup is True:\n raise ValueError(f\"Catch up values are mutually exclusive\")\n pass\n\n\ndef collect_events(helper, ew):\n start = timer()\n loglevel = helper.get_log_level()\n helper.set_log_level(loglevel)\n # Proxy setting configuration\n # proxy_settings = helper.get_proxy()\n global_email = helper.get_global_setting(\"email\")\n global_api_token = helper.get_global_setting(\"api_token\")\n global_corp_api_name = helper.get_global_setting(\"corp_api_name\")\n api_host = \"https://dashboard.signalsciences.net\"\n helper.log_info(\"email: %s\" % global_email)\n helper.log_info(\"corp: %s\" % global_corp_api_name)\n \n # Request / Read Timeouts\n request_timeout = float(helper.get_arg(\"request_timeout\"))\n read_timeout = float(helper.get_arg('read_timeout'))\n helper.log_info(f\"request configuration is: request:{request_timeout}, read: {read_timeout}\")\n \n # CatchUp Config Declaration\n twenty_hour_catchup = helper.get_arg('twenty_hour_catchup')\n helper.log_info(f\"twenty four hour catchup is: {twenty_hour_catchup}\")\n \n disable_catchup = helper.get_arg('disable_catchup')\n helper.log_info(f\"disable catchup is: {disable_catchup}\")\n\n def pull_events(delta, key=None):\n last_run_until = helper.get_check_point(\"activity_last_until_time\")\n helper.log_info(f\"last_run_until: {last_run_until}\")\n if last_run_until is None:\n (until_time, from_time) = get_from_and_until_times(\n helper, delta, five_min_offset=False\n )\n else:\n (until_time, from_time) = get_until_time(\n helper, last_run_until, delta, twenty_hour_catchup=twenty_hour_catchup, catchup_disabled=disable_catchup, five_min_offset=False\n )\n if from_time is None:\n helper.log_info(f\"{last_run_until} >= current now time, skipping run\")\n return\n if from_time >= until_time:\n helper.save_check_point(\"activity_last_until_time\", from_time)\n helper.log_info(\n f\"from_time {from_time} >= until_time {until_time}, skipping run\"\n )\n return\n helper.save_check_point(\"activity_last_until_time\", until_time)\n\n helper.log_info(f\"Start Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(from_time))}\")\n helper.log_info(f\"End Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(until_time))}\")\n\n input_name = helper.get_input_stanza_names()\n single_name = \"\"\n\n if type(input_name) is dict and input_name > 1:\n helper.log_info(\"Multi instance mode\")\n for current_name in input_name:\n single_name = current_name\n else:\n helper.log_info(\"Single instance mode\")\n helper.log_info(\"Inputs: %s\" % input_name)\n helper.log_info(\"Inputs Num: %s\" % len(input_name))\n single_name = input_name\n helper.log_info(f\"single_name: {single_name}\")\n\n # Loop across all the data and output it in one big JSON object\n url = (\n f\"{api_host}/api/v0/corps/{global_corp_api_name}\"\n f\"/activity?\"\n f\"from={from_time}&until={until_time}\"\n )\n config = Config(\n url=url,\n api_host=api_host,\n from_time=from_time,\n until_time=until_time,\n global_email=global_email,\n global_corp_api_name=global_corp_api_name,\n current_site=\"\",\n request_timeout=request_timeout,\n read_timeout=read_timeout,\n )\n config.headers = {\n \"Content-type\": \"application/json\",\n \"x-api-user\": global_email,\n \"x-api-token\": global_api_token,\n \"User-Agent\": config.user_agent_string,\n }\n helper.log_info(\"Pulling results from Corp Activity API\")\n all_events = get_results(\"Activity Events\", helper, config)\n total_requests = len(all_events)\n helper.log_info(\"Total Corp Activity Pulled: %s\" % total_requests)\n write_start = timer()\n for current_event in all_events:\n if key is None:\n source_type = helper.get_sourcetype()\n helper.log_info(\"Concurrent Mode\")\n source_type_info = type(source_type)\n active_index = helper.get_output_index()\n index_info = type(active_index)\n single_name_info = type(single_name)\n current_event_info = type(current_event)\n helper.log_info(f\"source_type: {source_type}\")\n helper.log_info(f\"source_type_info: {source_type_info}\")\n helper.log_info(f\"index: {active_index}\")\n helper.log_info(f\"index_info: {index_info}\")\n helper.log_info(f\"single_name: {single_name}\")\n helper.log_info(f\"single_name_info: {single_name_info}\")\n helper.log_info(f\"current_event: {current_event}\")\n helper.log_info(f\"current_event_info: {current_event_info}\")\n event = helper.new_event(\n source=single_name,\n index=helper.get_output_index(),\n sourcetype=source_type,\n data=current_event,\n )\n else:\n indexes = helper.get_output_index()\n current_index = indexes[key]\n types = helper.get_sourcetype()\n source_type = types[key]\n single_name = single_name[0]\n helper.log_info(\"Sequential Mode\")\n helper.log_info(f\"source_type: {source_type}\")\n helper.log_info(f\"index: {current_index}\")\n helper.log_info(f\"single_name: {single_name}\")\n helper.log_info(f\"current_event: {current_event}\")\n event = helper.new_event(\n source=single_name,\n index=current_index,\n sourcetype=source_type,\n data=current_event,\n )\n\n try:\n ew.write_event(event)\n except Exception as e:\n raise e\n write_end = timer()\n write_time = write_end - write_start\n write_time_result = round(write_time, 2)\n helper.log_info(f\"Total Corp Activity Output Time: {write_time_result} seconds\")\n\n # If multiple inputs configured it creates an array of values and the\n # script only gets called once per Input configuration\n time_deltas = helper.get_arg(\"interval\")\n helper.log_info(f\"interval: {time_deltas}\")\n if type(time_deltas) is dict:\n helper.log_info(\"run_type: Sequential\")\n for active_input in time_deltas:\n time_delta = time_deltas[active_input]\n time_delta = int(time_delta)\n helper.log_info(\"time_delta: %s\" % time_delta)\n pull_events(delta=time_delta, key=active_input)\n else:\n helper.log_info(\"Run Type: Concurrent\")\n helper.log_info(\"time_delta: %s\" % time_deltas)\n pull_events(delta=int(time_deltas))\n helper.log_info(\"Finished Pulling Corp Activity\")\n end = timer()\n total_time = end - start\n time_result = round(total_time, 2)\n helper.log_info(f\"Total Script Time: {time_result} seconds\")\n", "customized_options": [{"name": "disable_catchup", "value": true}, {"name": "twenty_hour_catchup", "value": false}, {"name": "request_timeout", "value": "60"}, {"name": "read_timeout", "value": "60"}], "uuid": "07b37d1943e942cf831c8ee85ffcb4a2", "sample_count": 0}]}, "field_extraction_builder": {"sigsci-event": {"is_parsed": true, "data_format": "json"}, "sigsci-activity": {"is_parsed": true, "data_format": "json"}, "sigsci-requests": {"is_parsed": true, "data_format": "json"}}, "global_settings_builder": {"global_settings": {"proxy_settings": {"proxy_type": "http"}, "log_settings": {"log_level": "DEBUG"}, "customized_settings": [{"required": true, "name": "email", "label": "E-mail", "default_value": "", "placeholder": "example@example.com", "help_string": "This it the e-mail username of the user who has the correct permissions for the app to pull the data.", "type": "text", "format_type": "text", "value": "jcocks+sigsci@fastly.com"}, {"required": true, "name": "corp_api_name", "label": "Corp", "default_value": "", "placeholder": "", "help_string": "This is the API name of your corp.", "type": "text", "format_type": "text", "value": "jcocks"}, {"required": true, "name": "api_token", "label": "API Token", "placeholder": "", "default_value": "", "help_string": "This is the API Token of the user who has the correct permissions. The API Token is tied to the username.", "type": "password", "format_type": "password", "value": ""}]}}, "sourcetype_builder": {"sigsci-requests": {"metadata": {"event_count": 0, "data_input_name": "SigsciRequests", "extractions_count": 0, "cims_count": 0}}, "sigsci-event": {"metadata": {"event_count": 0, "data_input_name": "SigsciEvent", "extractions_count": 0, "cims_count": 0}}, "sigsci-activity": {"metadata": {"event_count": 0, "data_input_name": "SigsciActivity", "extractions_count": 0, "cims_count": 0}}}, "validation": {"validators": ["best_practice_validation", "data_model_mapping_validation", "field_extract_validation", "app_cert_validation"], "status": "job_finished", "validation_id": "v_1709158986_53", "progress": 1.0}} \ No newline at end of file +{"basic_builder": {"appname": "sigsci_TA_for_splunk", "friendly_name": "Signal Sciences WAF TA", "version": "1.0.38", "author": "Fastly", "description": "For users of Fastly who would like to enrich their Splunk data with information from Fastly. This app with simple configuration enabled the importing of Events, Activity, and raw request information to Splunk.\n\nThis is an open source project, no support provided, public repository is available and installation documentation can be found at https://github.com/fastly/sigsci-splunk-app. The best way to report issues with the app is to create an Issue on the github page so that it can be tracked.", "theme": "#ea3f23", "large_icon": "iVBORw0KGgoAAAANSUhEUgAAAEgAAABICAYAAABV7bNHAAAAAXNSR0IArs4c6QAAD+5JREFUeF7tXAuMVNUZ/s69d96wCwULGoWGiFofNT6KsVEEhYqiEJAKiq0ixSpLa1EKKqAGBXV5RAlugkYXUFCK5dHEtKXl0QqrrNhoVojBRxsUUEGW3Z2dnZn7ar6zc8bLPnBn791BTU+y2dmz5/7n/P+c/z//4ztXuK7rIteSySRM04QQAvF4HOFwWP6ntrZWDZH9kUhE/m1ZFhoaGuRn9vF/bOl0Gk1NTfKzpmkoLS2FdeQIxJQpEP/+d55WZz6IK6+EePllqLWSRigUQrdu3fLk1HoNw0AikZBrsG0b9fX1+TE9e/bMf25sbEQ2m5V/e/mrq6uDKKaAtNGjoVVVdUYuXz8zfDiwefP/BdSuFIstoKNHj+ZVjNuU25Vax21HdWPzbsdUKoVMJiP7uYW7d+/eSq2i0ShisZjsdxwHcqumUkg8+CD0PXsgPvkE4tixgnaS27cv3D59YF1yCRoXLZIqxbWycZ1UOdXUetnP9XINuq6jpKSklRqyg2pIc0K+OV6pG02DKJaApD06cgTdIhGIu++G9pe/FCQg87e/hV1Whoymwendu3gCamxszO8gGjPVKE1lv9vrV0aYv73jaeT5oxq/QdVIKzRxIvT16wsT0IwZsObNa3NNaqd652hv7rb6vettyXfRjLRa2LFjx9B98uTCBTR9OuwFC0D1ZfvenWJ+BWRNmwZr4cLiC6iYNohCouFzx46FvmlTYSr20EMwZ8/OHxDfKyNdf+gQjJoaGFu3IpROQ9uwAeKzzwoSkHPJJbAvuwx2NAr78ssRGjoU4V69vrunGD3rTFUVIo8/DvHmmzyLIdJpWvOCBNNqMI0/PflwGM4558B6+GG4w4bJ47lLjvkgPekYT8GdO+Fu3gxRWQlx+LA/YXTwafeUU+BOmgQxZAgwdCiO5cKcb0+oYVmIb9qE8J/+1Lxb6uo6yFrAw2IxuIMHI33ttUjfdhuMaNR/LOY4Tn7P03tmAEq/gJ6w8lTpCavGfhXE2pkMslu2IDpzJrS9ewPm1h8559xzYS5eDGPoUGjhsAxWvd42DwvVqJ4qavDyx0C805608fHHCK9YgfDq1QWHDf5YL+DpaBSZ22+HOXEinAsvLF6ooVVWIlJeDrF/fwGrPXlD3dNOg/W738GYOTPv4XtTOCeMxZLJZF7FGNBRvehuMzxQoQaNHRsDTH3xYugVFUBj48njuDMzGwacm2+G9dRTcHv3hmXbeSqKb3aQbxUayf6OnmLawYOIP/AAQn/9KzNlnVnit+IZ68orkVqyBCWXXppfj/+E2eHDKL3sMmj//a9/P+ZbICY3kUD6wAHEcob6hAL6plDDePdddBs/vmg+TbHkR98p+dJLsAYN6nw+CG+/jfj06dDee69Y6y7qPM6PfoSmJ59EaMyY9hNmtbW1eSNNa06DTOOc3r8foenTEdq4saiLLvZk1rBhwLPPQh84UPLNYoPyiZgtbdtINzQgPncuwqtWFXu9J2U+86ab0LhwIdwePTpW1Qjv2IHE2LHf6dOqIEnrOjLl5UhNmtRaQJZl5VWM9Szr4EGUnnVWQfQLGkw/64c/lD/OqafCGj8e1uDBELW1iF9/vSTlbNuG5Omng19UaOVK6B9/DBw6BPHFF116ita/9x4iZ58tzQwbQ5DjQo1YJILwjTdC27q1IJ47NFgIOMznjBkD/YorYJ1/PtKAjJHYetTWwr3zTkAIJJ9+Gna/frKfaRP+iLffhr1jB7S//Q36li1dIijmnJL/+AfsXEqmVVUjtn49otOmsTTaIZ47Osjt3x9N8+cjO3QoSvv2hchVOul/KAH1pE+Sm7c+m80v0luxbWLJqb4e4W3bEHvoIQj6ZUE2w0Dm3nuRmj1bUpUCUqGG09CA2LhxMHbuDG5KTUP21lthLVoEN1caVtu3vXCGk1No3oqKqqp4wwAtmYQ+YwZC69fLmltQjX5R+pVXIPr0kaqWP8Wc5cshZs2CCCqXo+tofOYZWLfdJr8J1VjVIPOMc1gHVwLzBo8s8PH/bCxSqmwhUxEtqxrhTZsQv+8+acMCafE43PnzIX7/++aCJ/NBLgEIf/gDtOeeC2QONxZDauVKmMOHS+CAqr6SOAEEFBD7ybASkDfnxIS8V0A8PJQ9UgLy5nC46xO//jXE558Hsn5z7FiIpUuRoh/EUCNUVYUEkReHDvmewO3VC3Z5OXSmQGmYc6VnRbhHjx6yn2p0nA3yoC0oRGWbvDbIW/ZuWdXIrl2LGBN3AfCAWAzJFSsQHzcO4uiRI26UxMvKfAsHug7zV7+CvXgxojmGiyWgZH094i++iMicOUAOyuKHofTjjyMyYwZE/WefufGpU6H/+c9+6MlnnYEDkdy9W6ZqVVpWAQIUcdodtbNoX1TuhWGOanT3Vb+ipf3nP8hu3470iBEQJSXy6FfqyTSxAlQkBg+GHkDs6PzsZ3A3boRwDx1y3fPPh/jqK/8Cqq6G9tOftgmg8mOkrXXrEJs9G9qXX8KuqJDq217p2dq/H0b//r55QTyOut27Ier27nVLzj3XN0Hn4ouhvfOOpNMWwqwzAuIuspctQ2jmTB5nkF/k7t2yLnai2rx1110wnn/eN0/JrVshUqtXu7GJE/0Ri0TQuHo1wqNGSTo0sNz2bFQnpW78m9gbqh37qSLKx1EqwjFUK+PoUYglS6AtWiTpuOPHI11ZmVcrRtxKDSViJIcV4lhz507ER4yQQvXTzHnzIKyRI1399df90AGTT03btyNz6qmSTlsAqkJOsfTOnQjdfz/0XbvkYwQu6AsWoIlBZY7pE9XmS5JJiFGjoL37ri++7Ouug7AHDXK16mp/hMaMgblsGZpyqLJCBSSSScQ2b4axdi1QUiJjQen46TqcyZOBRYugde8uHcaOCKiHrsOeMcO3mjn9+0M4/fq5fss3ZlkZrEceAXO9Sq2U6qiQQn0D7FeVE/7P3rMHiVGjWvtgQoB03Vmz5A5l81ZaFB2pfrkqTH4Ox4FWUYHwrFm+vni3X79gBEQQgTtgAERpKbQ1awoy0jqdsTagMAxwsWcPsoZxwlBD2SwFA3aefBLOa69By2ah1dT4ExCzCEHsIIm2YKr2ggvg7NghU5YKCKkw12ql9J7VLmM4ER0xAtobb7RmJJGAXVd3HC2vf8WTUnnbkk4OeSbGjm3GPxL256l9dUZSLqGEQQjI/OUvJQTFLC2FedVV0olTC6ZaKLA5F8lAVIUadAjDS5YgMm9eM0Oelr3/fqTmzJEnoELMUijKBtHh9KJcVUAb+uc/ET98GHZVFYzKys7IJf+M27NnQAK6+24JsEznMnGFGGknk0H3sjLo69blF+Zccw3qXnlF7sqOxmLHwYCjUThPPAHtscf8CYg2yB4yxNW2b/dFyL7qKpgrV8LKIb645b15H6VunIQ7QoUa9JVU3ifU0AC7pgbagAGwiYnOrchLi+OVWnH3ePNEqhLBxyKNjdAYPhWIpG0pBPessyDS8+e7kVwGrdNSikSQfucdRM87T5IIypMmrW/KB7U00vy7rqYGJddfXzDMryX/zGWJzI4dbviKKzotG/Vg5rnnEJky5VshoMYNG5qrMj5bE1FyyX373MRPfuI7D02fIbtvn1yS1y+hOnmB6MqXUeBtBThXqsPn2/NxvLS8PpG3n88agwZB+PSiuY5UdTXEsX373BK61Cyt+GyZJUuQuuMO36FGZxJmykjHdu1C9LrrfHICuD/4AdyaGojagwfdBOvTS5f6J5pIyLKJceGFrS6zKOJdlVGkgLSvvkLJxRdD5O6w+WHIHj0a+sqVEOlUytU3boQxaZLv6Jc1Leuaa5BhAaBvX7m+YqmYQ5+KWILVq307iFy3+eKLsG6+ubmq4TKPc8cdEO+/70fozc8y4p42DU0PPgg3HM7fOFSEg65qNE+pI/7MMzDo9+SucPlhxGVWYs0a1F90EYRpmq5DZOucOQgtXuyH7tfPUki/+Q3Sc+dC79Yt71VzgLqqqfJEyoCrfo6hc6j66fsoH4e+lfKe6VtJw57JwFi6FLEFCwLZOZzfmjoV7hNPyKtXQsFfQv/6F2L33tuMIguiaRqsESPkVs3k7riSrLrUxlOIQlGnl/eym7faQcdShS30ibyhBukZEyZA27w5MMwk8QJNCxfCHDlSlquOhwFPmIAQJwuwEVLS9PLLyF56qVS5QIy0acLg5bwhQwJHvpnDhiH5xz9KCcjSs/dCHaqrEbvhBnYGKCJ55xHmz3+O7C23QKeHmwNpeS+vGakUuIsZtGYGD4aTK1Uf5/uYJuwtWxDesAGhTZuCx2fHYsi++iokqEr5Y8ehXOvrEZk/H6Hy8mAFlKPG6NghZnnkSGTvuguRM87I56vrq6vRfdw4idoQf/87cM458impVp9+isi6dQixZn7gALd9l6zPnjABdmUlwrnUSbvXwkuuvhp6AJ7oCbnQdemMOQx0eRc+k0GEhpZGcu5cWIYB4803ob/1FsSnnwZmgNtbE8Hmx95/vzWAquWdVbml9+6F8YtfBOJdd8lXHTBRAsvttWthsVjY4q5u23c1HAfmqlUI33OP7xgtYF66hJy7bJm8ie1qWsevhTd98YW8dhCuqAgUf9MlHHaWaCSC7KRJ0ObNg8E7Z23dm89ms3mMoirGUc1koqqhARp9o1df7RLIW2f5Cuo5d/hwOC+8gCxVLJe/pjOqoDd0Rr/xroZ8kMfza699f4REICmzhR98IGV9wqsI3h1EidHDVWXhPIiJ8LxZs6CvWUPoZ1Bf4MmhEwrBufVWmMuX58MZag43Qiu+M5mvQw2u1ou09yLOZRhA8MCKFdAefRTiyy9PDnN+Z00k4Dz8MMTUqajz3FhStwzbRNp/02UWrsn7chN71SqI8nJoe/b4XW5Rn3fPPhsuAVGTJ8ud0uELdel0Om+k2yvttkyH6vRmKypg8GJdwJDhwKUWjSJ7++3ysMl6cEPKfHC+9tK30tx09EKdWrh6Q5Nr28ju2oUQ3+Tis8QbuFBUaHPmmbCefVZe7HU07eS8gSr5+eeILl+O8MaN0IJIuAUgLefHP0Z29Gg4ZWWInnaav1d0NTQ05FVMFeNorJioUgAl9c4yGSd5inde4JLz0Uewt21D9KWXoL31VgBsFk6CN5vTU6YgOmwY7AED5PpVcbIlkMsL2FK+T0u+ZZGzUCPdkTdQhfbtQ+yRR6SgBN+CwDSo31dStCUvlrpjMdjnnYfUU0/BuuACOeo78wYq88gRaFu2wHrjDeiffCIzBBLs3Vlh0cHr0wd8aYAYOBC46CJg1CjYvXt33Su6Wl6HUg4Tt5eqryvICr8dL/yWYxUKnmOVKraCv0SjvBQiX9HVwBjvwAHoH34Io6pKXnGS15yYpPNAgTmXy1fp9O8vo2ybAunXD/FevUCwelM4DCsnaC/8RXnG/M1+8kFToFK8aiN6YcdUN4Wp9PJHbfkf65FaJpqTIWoAAAAASUVORK5CYII=", "small_icon": "iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAADhAJiYAAAAAXNSR0IArs4c6QAABiNJREFUWEetWFtIVF0U/s6Zm859JCV8SoKgjMS0C0SUFQVJYQSlFUlJ9eBDZBFB+Wag9FJSkRQGQuRb9hDdKI2E/JUopKwISoiIUObijHN1zv75tjq/l3Eu+u+nxTlrr7X2Wmt/a62tCCGEpmnw+XxwuVyIRqMYHx+XNJfH44HVaoXBYEjQWm8vTHv2yP8Lru/f4bHb5W/KCoVCCIfDkh4bG0M8Hpe0EAJerxdOpxMTExNQAoGAUBQFer0esVgMqqpCp9NJmouGkJEbSVOQ6O2FeffulPaEhoagW7lS8lAWZVI2aeqiTtLTuqUxigLF6/UKMprNZgQCAanUZDJJmove4eloCGmekputT59CV1OT1KjIq1cIl5VJfi7KokzKJk1d1EmaRlgsFhkVGq0sJmQUHOroQG5tbXKDnjyBae9eGeKsQ+Z2u6WHHA6HFGA0GqXFM4X5/X7pFcacp6KrbY8eQV9Xl9Sg8IsXCJWXz8rD3Nxc5OTkSLk2m02GjTQ9xPxhHslQRiIROkkq5AfSDA+9MDP+IhqF6fRpqF1dQCSSOqGn/xoM0KqqELl3j8kIXp65OTlXd9qQBd6+haW2FsrPn5kZsQCXKCzEeGcnrFu3pr5lC4ZsZASOykqoAwNLMmTuZm3dOoh//oFuKnzzQhaPx2XImCd2u13mB2+Vc/nyzEOTrcmqCt/fv7Dl5cmdxCXmFVNlPg719yN31y5A07JVkzV/uLsb2saNMncTODQzZL7hYTiKirIWvJQN/k+fYCsu/u+WJXDI7YaroAAQYinys9+rKPD++QNHQcGklxJIvW8fdG/eZCxQ27AB0Y4OhJ1OOM6cQVzTEGhvhzMYhHboENT+/sxllZYi0NMzidQej0cotHDNmowERI8fR/DGDQmkLCORSETSTEiCJmmelKXAfv481Pb2jOT6BgehKyqaLB1QlIw2BZubYWhoSFR+Ii8RmIjL0/GWkmaS8taQNj18CHN9fUbyY9EoFPfoqHAtW5Z2Q+TmTZjq6xOlY25LQa8QOvh9Ji2La08PrBUVaXWMu91Q4leuCLWpKSWztn49MDAgFdIj9ABp1j1WcdL0EKu4xLMtW6C9e4fAFHTQW1plJfTPnqXUE6urgxKrqBD67u6UjBPd3dBv3z6vWZsbMjYbKvPp8GHoOjtnFejYly8wpMnTeFkZFC0vTyhu98IG6XTwezzzqv3MkDmGhoDRUahVVdBqaqB1dCTCR8HMJVnt2UFONX7JFAqHA4owmwWCwYUNMpkQ+/oVhhUrknrIeOAAdM+fy/3RixcxfunSrKTmdxof/vEDOWvXAqHQwrpyczMwSFWhbduG8cePZT/DnpttBGkmb05hIcFMKmHTpq+ulv/JRx4uwoNlxw6o79+nBV5FOBwCPt/CVhuN8I+OLhiy6K9fsNy+DaWkBGNVVUlvWSJkTmfqgm02Q5nYvFno+vpSZ7/bDYPLlTapk+FQImQ+H3I4yaQoTdrq1VAiR48K44MHKQ2KnDqFYEvLrBY2GxySo1RjIwy3bqWGl8pKKL7Pn4W9uDgtaPm+fYNj1apFAaN3eBjODLqI8IcP2ZUO7+/fsOTnZ1U6ZMjy84F4PO2hJ0uH2y2M3d2wHDyYdgNrXuD1a8RKSuZNo8lKh21kBHqCYQbGjLe1QRw5MjWXxeNQOdSFw+mNAiBsNoz19UlsYrmInjsHPWW0tsrEN4yOwlJeDmVsLCN5MBohQiFMsIWdbtACg4Owl5ZmJmCaS1EgXC4oU2AnCGwcDrNs8gIvX8Kyc+dkgzazhY00NcHU2JidUUvkJrobW1qStLBTrx/ayZNQ799foprMtov9+4GurtmvH36/X75+cKIk3HOszmlogNrWlpnURXJp1dUI3r0rd7ONmX4JmRWymbP9WH8/7Js2LVJd6m3i40dZapLO9qleP1gkY5cvw9Tc/L8YFj17FoZr1+Cfet5J+mCVyetHgFf5wgWYOjqAiYmsjYucOAG0tsJktaZ//Zgeg/gEw/aTucS2gTQX289gMCinCtIcs03Xr0O9c4djLxT2UtNTLmHAYoGwWiGOHYNy9aqUQVlsdZkrpIldbHlJM3/5sMWJhd/+BXe3XAACKxnuAAAAAElFTkSuQmCC", "visible": true, "tab_version": "4.1.4", "tab_build_no": "0", "build_no": 18}, "data_input_builder": {"datainputs": [{"index": "default", "sourcetype": "sigsci-event", "interval": "300", "use_external_validation": true, "streaming_mode_xml": true, "name": "SigsciEvent", "title": "SigsciEvent", "description": "", "type": "customized", "parameters": [{"name": "site_api_name", "label": "Site API Name", "help_string": "This is the Site API Name. It should not be a URL.", "required": true, "format_type": "text", "default_value": "", "placeholder": "", "type": "text", "value": "jeremycx"}, {"name": "disable_catchup", "label": "Disable Catchup", "help_string": "Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", "required": false, "format_type": "checkbox", "default_value": true, "type": "checkbox", "value": false}, {"name": "twenty_hour_catchup", "label": "24 Hour Catchup", "help_string": "In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be False in order to work. ", "required": false, "format_type": "checkbox", "default_value": false, "type": "checkbox", "value": false}, {"name": "request_timeout", "label": "Request Timeout", "help_string": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "60", "type": "text", "value": "60"}, {"name": "read_timeout", "label": "read_timeout", "help_string": "Configured Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "60", "type": "text", "value": "60"}], "data_inputs_options": [{"type": "customized_var", "name": "site_api_name", "title": "Site API Name", "description": "This is the Site API Name. It should not be a URL.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "", "placeholder": ""}, {"type": "customized_var", "name": "disable_catchup", "title": "Disable Catchup", "description": "Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": true}, {"type": "customized_var", "name": "twenty_hour_catchup", "title": "24 Hour Catchup", "description": "In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be False in order to work. ", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": false}, {"type": "customized_var", "name": "request_timeout", "title": "Request Timeout", "description": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "60"}, {"type": "customized_var", "name": "read_timeout", "title": "read_timeout", "description": "Configured Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "60"}], "code": "# encoding = utf-8\nfrom timeit import default_timer as timer\nimport requests\nimport json\nimport time\nfrom datetime import datetime\nfrom sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time, validate_timeouts\n\n\"\"\"\n IMPORTANT\n Edit only the validate_input and collect_events functions.\n Do not edit any other part in this file.\n This file is generated only once when creating the modular input.\n\"\"\"\n\n\n# def use_single_instance_mode():\n# return True\n\n\ndef validate_input(helper, definition):\n request_timeout = definition.parameters.get(\"request_timeout\", None)\n read_timeout = definition.parameters.get(\"read_timeout\", None)\n validate_timeouts(request_timeout, read_timeout)\n \n site_name = definition.parameters.get(\"site_api_name\", None)\n if site_name is None or site_name == \"\":\n msg = \"The site_name can not be empty\"\n raise ValueError(\"InvalidSiteName\", msg)\n elif \"http\" in site_name:\n msg = (\n \"The site name is not the full URL it should be the \",\n \"API Name of the site like 'my_example_site'\",\n )\n raise ValueError(\"InvalidSiteName\", msg)\n elif \" \" in site_name:\n msg = (\n \"The site name should be the API Name of the site like \",\n \"not the Display Name. Example would be 'my_site_name' instead of \",\n \"My Site Name\",\n )\n raise ValueError(\"InvalidSiteName\", msg)\n\n # Catchup Opts\n twenty_hour_catchup = definition.parameters.get('twenty_hour_catchup', None)\n disable_catchup = definition.parameters.get('disable_catchup', None)\n if twenty_hour_catchup and disable_catchup is True:\n raise ValueError(f\"Catch up values are mutually exclusive\")\n \n pass\n\n\ndef collect_events(helper, ew):\n start = timer()\n loglevel = helper.get_log_level()\n # Proxy setting configuration\n # proxy_settings = helper.get_proxy()\n global_email = helper.get_global_setting(\"email\")\n global_api_token = helper.get_global_setting(\"api_token\")\n global_corp_api_name = helper.get_global_setting(\"corp_api_name\")\n api_host = \"https://dashboard.signalsciences.net\"\n helper.log_info(\"email: %s\" % global_email)\n helper.log_info(\"corp: %s\" % global_corp_api_name)\n \n # Request / Read Timeouts\n request_timeout = float(helper.get_arg(\"request_timeout\"))\n read_timeout = float(helper.get_arg('read_timeout'))\n helper.log_info(f\"request configuration is: request:{request_timeout}, read: {read_timeout}\")\n \n # Config Declaration\n twenty_hour_catchup = helper.get_arg('twenty_hour_catchup')\n helper.log_info(f\"twenty four hour catchup is: {twenty_hour_catchup}\")\n \n disable_catchup = helper.get_arg('disable_catchup')\n helper.log_info(f\"disable catchup is: {disable_catchup}\")\n\n def pull_events(current_site, delta, key=None):\n site_name = current_site\n last_name = f\"events_last_until_time_{current_site}\"\n last_run_until = helper.get_check_point(last_name)\n helper.log_info(f\"last_run_until: {last_run_until}\")\n if last_run_until is None:\n (until_time, from_time) = get_from_and_until_times(\n helper, delta, five_min_offset=False\n )\n else:\n (until_time, from_time) = get_until_time(\n helper, last_run_until, delta, twenty_hour_catchup=twenty_hour_catchup, catchup_disabled=disable_catchup, five_min_offset=False\n )\n if from_time is None or from_time > until_time:\n helper.log_info(f\"{from_time} >= current now time, skipping run\")\n return\n if from_time >= until_time:\n helper.save_check_point(last_name, from_time)\n helper.log_info(\n f\"from_time {from_time} >= until_time {until_time}, skipping run\"\n )\n return\n helper.save_check_point(last_name, until_time)\n helper.log_info(\"SiteName: %s\" % site_name)\n\n helper.log_info(f\"Start Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(from_time))}\")\n helper.log_info(f\"End Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(until_time))}\")\n\n input_name = helper.get_input_stanza_names()\n single_name = \"\"\n\n if type(input_name) is dict and input_name > 1:\n helper.log_info(\"Multi instance mode\")\n for current_name in input_name:\n single_name = current_name\n else:\n helper.log_info(\"Single instance mode\")\n helper.log_info(\"Inputs: %s\" % input_name)\n helper.log_info(\"Inputs Num: %s\" % len(input_name))\n single_name = input_name\n helper.log_info(f\"single_name: {single_name}\")\n\n # Loop across all the data and output it in one big JSON object\n url = (\n f\"{api_host}/api/v0/corps/{global_corp_api_name}\"\n f\"/sites/{site_name}/activity?\"\n f\"from={from_time}&until={until_time}\"\n )\n helper.log_info(\"Pulling results from Events API\")\n config = Config(\n url=url,\n api_host=api_host,\n from_time=from_time,\n until_time=until_time,\n global_email=global_email,\n global_corp_api_name=global_corp_api_name,\n current_site=current_site,\n request_timeout=request_timeout,\n read_timeout=read_timeout,\n )\n config.headers = {\n \"Content-type\": \"application/json\",\n \"x-api-user\": global_email,\n \"x-api-token\": global_api_token,\n \"User-Agent\": config.user_agent_string,\n }\n all_events = get_results(\"Events\", helper, config)\n total_requests = len(all_events)\n helper.log_info(\"Total Events Pulled: %s\" % total_requests)\n write_start = timer()\n for current_event in all_events:\n helper.log_debug(current_event)\n if key is None:\n source_type = helper.get_sourcetype()\n helper.log_info(\"Concurrent Mode\")\n source_type_info = type(source_type)\n active_index = helper.get_output_index()\n index_info = type(active_index)\n single_name_info = type(single_name)\n current_event_info = type(current_event)\n helper.log_info(f\"source_type: {source_type}\")\n helper.log_info(f\"source_type_info: {source_type_info}\")\n helper.log_info(f\"index: {active_index}\")\n helper.log_info(f\"index_info: {index_info}\")\n helper.log_info(f\"single_name: {single_name}\")\n helper.log_info(f\"single_name_info: {single_name_info}\")\n helper.log_info(f\"current_event: {current_event}\")\n helper.log_info(f\"current_event_info: {current_event_info}\")\n event = helper.new_event(\n source=single_name,\n index=helper.get_output_index(),\n sourcetype=source_type,\n data=current_event,\n )\n else:\n indexes = helper.get_output_index()\n current_index = indexes[key]\n types = helper.get_sourcetype()\n source_type = types[key]\n single_name = single_name[0]\n helper.log_info(\"Sequential Mode\")\n helper.log_info(f\"source_type: {source_type}\")\n helper.log_info(f\"index: {current_index}\")\n helper.log_info(f\"single_name: {single_name}\")\n helper.log_info(f\"current_event: {current_event}\")\n event = helper.new_event(\n source=single_name,\n index=current_index,\n sourcetype=source_type,\n data=current_event,\n )\n\n try:\n ew.write_event(event)\n except Exception as e:\n raise e\n write_end = timer()\n write_time = write_end - write_start\n write_time_result = round(write_time, 2)\n helper.log_info(\"Total Event Output Time: %s seconds\" % write_time_result)\n\n # If multiple inputs configured it creates an array of values and the\n # script only gets called once per Input configuration\n\n all_sites = helper.get_arg(\"site_api_name\")\n time_deltas = helper.get_arg(\"interval\")\n helper.log_info(f\"interval: {time_deltas}\")\n if type(all_sites) is dict:\n helper.log_info(\"run_type: Sequential\")\n for active_input in all_sites:\n site = all_sites[active_input]\n current_delta = int(time_deltas[active_input])\n helper.log_info(\"site: %s\" % site)\n pull_events(key=active_input, current_site=site, delta=current_delta)\n helper.log_info(\"Finished Pulling Events for %s\" % site)\n else:\n helper.log_info(\"Run Type: Concurrent\")\n site = helper.get_arg(\"site_api_name\")\n helper.log_info(\"site: %s\" % site)\n pull_events(current_site=site, delta=int(time_deltas))\n helper.log_info(\"Finished Pulling Events for %s\" % site)\n end = timer()\n total_time = end - start\n time_result = round(total_time, 2)\n helper.log_info(\"Total Script Time: %s seconds\" % time_result)", "customized_options": [{"name": "site_api_name", "value": "jeremycx"}, {"name": "disable_catchup", "value": false}, {"name": "twenty_hour_catchup", "value": false}, {"name": "request_timeout", "value": "60"}, {"name": "read_timeout", "value": "60"}], "uuid": "294ad5bbdf92407b9a6785b46106152a", "sample_count": 0}, {"index": "default", "sourcetype": "sigsci-activity", "interval": "300", "use_external_validation": true, "streaming_mode_xml": true, "name": "SigsciActivity", "title": "SigsciActivity", "description": "", "type": "customized", "parameters": [{"name": "disable_catchup", "label": "Disable Catchup", "help_string": "Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", "required": false, "format_type": "checkbox", "default_value": true, "type": "checkbox", "value": true}, {"name": "twenty_hour_catchup", "label": "24 Hour Catchup", "help_string": "In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be false in order to work. ", "required": false, "format_type": "checkbox", "default_value": false, "type": "checkbox", "value": false}, {"name": "request_timeout", "label": "Request Timeout", "help_string": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "60", "type": "text", "value": "60"}, {"name": "read_timeout", "label": "Read Timeout", "help_string": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "60", "type": "text", "value": "60"}], "data_inputs_options": [{"type": "customized_var", "name": "disable_catchup", "title": "Disable Catchup", "description": "Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": true}, {"type": "customized_var", "name": "twenty_hour_catchup", "title": "24 Hour Catchup", "description": "In the event the last time stored is >24Hours the TA will try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be false in order to work. ", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": false}, {"type": "customized_var", "name": "request_timeout", "title": "Request Timeout", "description": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "60"}, {"type": "customized_var", "name": "read_timeout", "title": "Read Timeout", "description": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "60"}], "code": "# encoding = utf-8\nfrom timeit import default_timer as timer\nimport json\nimport time\nfrom datetime import datetime\nfrom sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time, validate_timeouts\n\n\"\"\"\n IMPORTANT\n Edit only the validate_input and collect_events functions.\n Do not edit any other part in this file.\n This file is generated only once when creating the modular input.\n\"\"\"\n\n\n# def use_single_instance_mode():\n# return True\n\n\ndef validate_input(helper, definition):\n request_timeout = definition.parameters.get(\"request_timeout\", None)\n read_timeout = definition.parameters.get(\"read_timeout\", None)\n validate_timeouts(request_timeout, read_timeout) \n \n # Catchup Opts\n twenty_hour_catchup = definition.parameters.get('twenty_hour_catchup', None)\n disable_catchup = definition.parameters.get('disable_catchup', None)\n if twenty_hour_catchup and disable_catchup is True:\n raise ValueError(f\"Catch up values are mutually exclusive\")\n pass\n\n\ndef collect_events(helper, ew):\n start = timer()\n loglevel = helper.get_log_level()\n helper.set_log_level(loglevel)\n # Proxy setting configuration\n # proxy_settings = helper.get_proxy()\n global_email = helper.get_global_setting(\"email\")\n global_api_token = helper.get_global_setting(\"api_token\")\n global_corp_api_name = helper.get_global_setting(\"corp_api_name\")\n api_host = \"https://dashboard.signalsciences.net\"\n helper.log_info(\"email: %s\" % global_email)\n helper.log_info(\"corp: %s\" % global_corp_api_name)\n \n # Request / Read Timeouts\n request_timeout = float(helper.get_arg(\"request_timeout\"))\n read_timeout = float(helper.get_arg('read_timeout'))\n helper.log_info(f\"request configuration is: request:{request_timeout}, read: {read_timeout}\")\n \n # CatchUp Config Declaration\n twenty_hour_catchup = helper.get_arg('twenty_hour_catchup')\n helper.log_info(f\"twenty four hour catchup is: {twenty_hour_catchup}\")\n \n disable_catchup = helper.get_arg('disable_catchup')\n helper.log_info(f\"disable catchup is: {disable_catchup}\")\n\n def pull_events(delta, key=None):\n last_run_until = helper.get_check_point(\"activity_last_until_time\")\n helper.log_info(f\"last_run_until: {last_run_until}\")\n if last_run_until is None:\n (until_time, from_time) = get_from_and_until_times(\n helper, delta, five_min_offset=False\n )\n else:\n (until_time, from_time) = get_until_time(\n helper, last_run_until, delta, twenty_hour_catchup=twenty_hour_catchup, catchup_disabled=disable_catchup, five_min_offset=False\n )\n if from_time is None:\n helper.log_info(f\"{last_run_until} >= current now time, skipping run\")\n return\n if from_time >= until_time:\n helper.save_check_point(\"activity_last_until_time\", from_time)\n helper.log_info(\n f\"from_time {from_time} >= until_time {until_time}, skipping run\"\n )\n return\n helper.save_check_point(\"activity_last_until_time\", until_time)\n\n helper.log_info(f\"Start Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(from_time))}\")\n helper.log_info(f\"End Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(until_time))}\")\n\n input_name = helper.get_input_stanza_names()\n single_name = \"\"\n\n if type(input_name) is dict and input_name > 1:\n helper.log_info(\"Multi instance mode\")\n for current_name in input_name:\n single_name = current_name\n else:\n helper.log_info(\"Single instance mode\")\n helper.log_info(\"Inputs: %s\" % input_name)\n helper.log_info(\"Inputs Num: %s\" % len(input_name))\n single_name = input_name\n helper.log_info(f\"single_name: {single_name}\")\n\n # Loop across all the data and output it in one big JSON object\n url = (\n f\"{api_host}/api/v0/corps/{global_corp_api_name}\"\n f\"/activity?\"\n f\"from={from_time}&until={until_time}\"\n )\n config = Config(\n url=url,\n api_host=api_host,\n from_time=from_time,\n until_time=until_time,\n global_email=global_email,\n global_corp_api_name=global_corp_api_name,\n current_site=\"\",\n request_timeout=request_timeout,\n read_timeout=read_timeout,\n )\n config.headers = {\n \"Content-type\": \"application/json\",\n \"x-api-user\": global_email,\n \"x-api-token\": global_api_token,\n \"User-Agent\": config.user_agent_string,\n }\n helper.log_info(\"Pulling results from Corp Activity API\")\n all_events = get_results(\"Activity Events\", helper, config)\n total_requests = len(all_events)\n helper.log_info(\"Total Corp Activity Pulled: %s\" % total_requests)\n write_start = timer()\n for current_event in all_events:\n if key is None:\n source_type = helper.get_sourcetype()\n helper.log_info(\"Concurrent Mode\")\n source_type_info = type(source_type)\n active_index = helper.get_output_index()\n index_info = type(active_index)\n single_name_info = type(single_name)\n current_event_info = type(current_event)\n helper.log_info(f\"source_type: {source_type}\")\n helper.log_info(f\"source_type_info: {source_type_info}\")\n helper.log_info(f\"index: {active_index}\")\n helper.log_info(f\"index_info: {index_info}\")\n helper.log_info(f\"single_name: {single_name}\")\n helper.log_info(f\"single_name_info: {single_name_info}\")\n helper.log_info(f\"current_event: {current_event}\")\n helper.log_info(f\"current_event_info: {current_event_info}\")\n event = helper.new_event(\n source=single_name,\n index=helper.get_output_index(),\n sourcetype=source_type,\n data=current_event,\n )\n else:\n indexes = helper.get_output_index()\n current_index = indexes[key]\n types = helper.get_sourcetype()\n source_type = types[key]\n single_name = single_name[0]\n helper.log_info(\"Sequential Mode\")\n helper.log_info(f\"source_type: {source_type}\")\n helper.log_info(f\"index: {current_index}\")\n helper.log_info(f\"single_name: {single_name}\")\n helper.log_info(f\"current_event: {current_event}\")\n event = helper.new_event(\n source=single_name,\n index=current_index,\n sourcetype=source_type,\n data=current_event,\n )\n\n try:\n ew.write_event(event)\n except Exception as e:\n raise e\n write_end = timer()\n write_time = write_end - write_start\n write_time_result = round(write_time, 2)\n helper.log_info(f\"Total Corp Activity Output Time: {write_time_result} seconds\")\n\n # If multiple inputs configured it creates an array of values and the\n # script only gets called once per Input configuration\n time_deltas = helper.get_arg(\"interval\")\n helper.log_info(f\"interval: {time_deltas}\")\n if type(time_deltas) is dict:\n helper.log_info(\"run_type: Sequential\")\n for active_input in time_deltas:\n time_delta = time_deltas[active_input]\n time_delta = int(time_delta)\n helper.log_info(\"time_delta: %s\" % time_delta)\n pull_events(delta=time_delta, key=active_input)\n else:\n helper.log_info(\"Run Type: Concurrent\")\n helper.log_info(\"time_delta: %s\" % time_deltas)\n pull_events(delta=int(time_deltas))\n helper.log_info(\"Finished Pulling Corp Activity\")\n end = timer()\n total_time = end - start\n time_result = round(total_time, 2)\n helper.log_info(f\"Total Script Time: {time_result} seconds\")\n", "customized_options": [{"name": "disable_catchup", "value": true}, {"name": "twenty_hour_catchup", "value": false}, {"name": "request_timeout", "value": "60"}, {"name": "read_timeout", "value": "60"}], "uuid": "07b37d1943e942cf831c8ee85ffcb4a2", "sample_count": 0}, {"index": "default", "sourcetype": "sigsci-requests", "interval": "300", "use_external_validation": true, "streaming_mode_xml": true, "name": "SigsciRequests", "title": "SigsciRequests", "description": "", "type": "customized", "parameters": [{"name": "site_api_name", "label": "Site API Name", "help_string": "This is the API Name of the site to pull request data from. This should not be a URL.", "required": true, "format_type": "text", "default_value": "", "placeholder": "", "type": "text", "value": "jeremycx"}, {"name": "request_limit", "label": "Request Limit", "help_string": "The amount of request objects returned in the array. Default: 100. Max:1000", "required": true, "format_type": "text", "default_value": "1000", "placeholder": "", "type": "text", "value": "1000"}, {"name": "disable_catchup", "label": "Disable Catchup", "help_string": "Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", "required": false, "format_type": "checkbox", "default_value": true, "type": "checkbox", "value": true}, {"name": "twenty_hour_catchup", "label": "24 Hour Catchup", "help_string": "In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be False in order to work.", "required": false, "format_type": "checkbox", "default_value": false, "type": "checkbox", "value": false}, {"name": "attack_and_anomaly_signals_only", "label": "Attack & Anomaly Signals Only", "help_string": "Only retrieves requests that contain attack or anomaly signals. Please evaluate your signal configuration if there are overly inclusive signals creating excessive requests.", "required": false, "format_type": "checkbox", "default_value": false, "type": "checkbox", "value": false}, {"name": "request_timeout", "label": "Request Timeout", "help_string": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "Request Timeout", "type": "text", "value": "60"}, {"name": "read_timeout", "label": "Read Timeout", "help_string": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required": true, "format_type": "text", "default_value": "60", "placeholder": "", "type": "text", "value": "60"}], "data_inputs_options": [{"type": "customized_var", "name": "site_api_name", "title": "Site API Name", "description": "This is the API Name of the site to pull request data from. This should not be a URL.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "", "placeholder": ""}, {"type": "customized_var", "name": "request_limit", "title": "Request Limit", "description": "The amount of request objects returned in the array. Default: 100. Max:1000", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "1000", "placeholder": ""}, {"type": "customized_var", "name": "disable_catchup", "title": "Disable Catchup", "description": "Disables catch-up behavior. Events will always be ingested from now minus the delta (including an offset for the requests feed). Recommended to be left true. Default: True.", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": true}, {"type": "customized_var", "name": "twenty_hour_catchup", "title": "24 Hour Catchup", "description": "In the event the last time stored is >24hours the TA will try can try and catch-up from exactly 24 hours ago, otherwise resets to now minus the delta. 'Disable Catchup' must be False in order to work.", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": false}, {"type": "customized_var", "name": "attack_and_anomaly_signals_only", "title": "Attack & Anomaly Signals Only", "description": "Only retrieves requests that contain attack or anomaly signals. Please evaluate your signal configuration if there are overly inclusive signals creating excessive requests.", "required_on_edit": false, "required_on_create": false, "format_type": "checkbox", "default_value": false}, {"type": "customized_var", "name": "request_timeout", "title": "Request Timeout", "description": "Configures Request Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": "Request Timeout"}, {"type": "customized_var", "name": "read_timeout", "title": "Read Timeout", "description": "Configures Read Timeout for HTTP operations. Consider increasing if on a slow connection or pagination batches are large.", "required_on_edit": false, "required_on_create": true, "format_type": "text", "default_value": "60", "placeholder": ""}], "customized_options": [{"name": "site_api_name", "value": "jeremycx"}, {"name": "request_limit", "value": "1000"}, {"name": "disable_catchup", "value": true}, {"name": "twenty_hour_catchup", "value": false}, {"name": "attack_and_anomaly_signals_only", "value": false}, {"name": "request_timeout", "value": "60"}, {"name": "read_timeout", "value": "60"}], "code": "# encoding = utf-8\nfrom timeit import default_timer as timer\nimport time\nfrom datetime import datetime, timezone, timedelta\nfrom sigsci_helper import get_from_and_until_times, Config, get_results, get_until_time, validate_timeouts\n\n\"\"\"\n IMPORTANT\n Edit only the validate_input and collect_events functions.\n Do not edit any other part in this file.\n This file is generated only once when creating the modular input.\n\"\"\"\n\n# def use_single_instance_mode():\n# return True\n\ndef validate_input(helper,definition):\n request_limit = int(definition.parameters.get(\"request_limit\", None))\n if request_limit is None or request_limit == \"\":\n raise ValueError('The request limit cannot be blank')\n if request_limit <= 0:\n raise ValueError('The request limit cannot be 0')\n if request_limit > 1000:\n raise ValueError('Request Limit cannot be greater than 1000')\n\n # Read Timeout passed to send_http_request. Type: float.\n # https://docs.splunk.com/Documentation/AddonBuilder/4.1.4/UserGuide/PythonHelperFunctions\n # We do this per input module as splunk provides no way to validate global configuration arguments :')\n request_timeout = definition.parameters.get(\"request_timeout\", None)\n read_timeout = definition.parameters.get(\"read_timeout\", None)\n validate_timeouts(request_timeout, read_timeout)\n\n twenty_hour_catchup = definition.parameters.get('twenty_hour_catchup', None)\n disable_catchup = definition.parameters.get('disable_catchup', None)\n if twenty_hour_catchup and disable_catchup is True:\n raise ValueError(f\"Catch up values are mutually exclusive\")\n\n site_name = definition.parameters.get(\"site_api_name\", None)\n if site_name is None or site_name == \"\":\n msg = \"The site_name can not be empty\"\n raise ValueError(\"InvalidSiteName\", msg)\n elif \"http\" in site_name:\n msg = (\n \"The site name is not the full URL it should be the \",\n \"API Name of the site like 'my_example_site'\",\n )\n raise ValueError(\"InvalidSiteName\", msg)\n elif \" \" in site_name:\n msg = (\n \"The site name should be the API Name of the site like \",\n \"not the Display Name. Example would be 'my_site_name' instead of \",\n \"My Site Name\",\n )\n raise ValueError(\"InvalidSiteName\", msg)\n pass\n\n\ndef collect_events(helper, ew):\n start = timer()\n loglevel = helper.get_log_level()\n helper.set_log_level(loglevel)\n # Proxy setting configuration\n # proxy_settings = helper.get_proxy()\n api_host = \"https://dashboard.signalsciences.net\"\n global_email = helper.get_global_setting(\"email\")\n global_api_token = helper.get_global_setting(\"api_token\")\n global_corp_api_name = helper.get_global_setting(\"corp_api_name\")\n helper.log_info(\"email: %s\" % global_email)\n helper.log_info(\"corp: %s\" % global_corp_api_name)\n \n # Request / Read Timeouts\n request_timeout = float(helper.get_arg(\"request_timeout\"))\n read_timeout = float(helper.get_arg('read_timeout'))\n helper.log_info(f\"request configuration is: request:{request_timeout}, read: {read_timeout}\")\n\n # Config declaration.\n twenty_hour_catchup = helper.get_arg('twenty_hour_catchup')\n helper.log_info(f\"twenty four hour catchup is: {twenty_hour_catchup}\")\n\n disable_catchup = helper.get_arg('disable_catchup')\n helper.log_info(f\"disable catchup is: {disable_catchup}\")\n\n attack_and_anomaly_signals_only = helper.get_arg('attack_and_anomaly_signals_only')\n helper.log_info(f\"attack signals only is: {attack_and_anomaly_signals_only}\")\n\n def pull_requests(helper, current_site, delta, key=None):\n site_name = current_site\n last_name = f\"requests_last_until_time_{current_site}\"\n last_run_until = helper.get_check_point(last_name)\n request_limit = helper.get_arg('request_limit')\n helper.log_info(f\"request limit: {request_limit}\")\n\n if last_run_until is None:\n helper.log_info(\"no last_run_time found in checkpoint state\")\n helper.log_debug(\"get_from_until\")\n until_time, from_time = get_from_and_until_times(\n helper, delta, five_min_offset=True\n )\n else:\n helper.log_info(f\"last_run_until found in state: {last_run_until}\")\n helper.log_debug(\"get_until\")\n until_time, from_time = get_until_time(\n helper, last_run_until, delta, twenty_hour_catchup, disable_catchup, five_min_offset=True\n )\n\n if from_time is None:\n helper.log_info(f\"{last_run_until} >= current now time, skipping run\")\n return\n\n if from_time >= until_time:\n helper.save_check_point(last_name, from_time)\n helper.log_info(\n f\"from_time {from_time} >= until_time {until_time}, skipping run\"\n )\n return\n\n helper.log_info(\"SiteName: %s\" % site_name)\n helper.log_info(f\"Start Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(from_time))}\")\n helper.log_info(f\"End Period: {time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime(until_time))}\")\n\n input_name = helper.get_input_stanza_names()\n single_name = \"\"\n\n if type(input_name) is dict and input_name > 1:\n helper.log_info(\"Multi instance mode\")\n for current_name in input_name:\n single_name = current_name\n else:\n helper.log_info(\"Single instance mode\")\n helper.log_info(\"Inputs: %s\" % input_name)\n helper.log_info(\"Inputs Num: %s\" % len(input_name))\n single_name = input_name\n helper.log_info(f\"single_name: {single_name}\")\n\n # Loop across all the data and output it in one big JSON object\n url = (\n f\"{api_host}/api/v0/corps/{global_corp_api_name}\"\n f\"/sites/{site_name}/feed/requests\"\n f\"?limit={request_limit}\"\n f\"&from={from_time}&until={until_time}\"\n )\n if attack_and_anomaly_signals_only:\n attack_signals = [\n \"USERAGENT\",\n \"AWS-SSRF\",\n \"BACKDOOR\",\n \"CMDEXE\",\n \"SQLI\",\n \"TRAVERSAL\",\n \"XSS\",\n \"XXE\"\n ]\n anomaly_signals = [\n \"2FA-DISABLED\", \"2FA-CHANGED\", \"ABNORMALPATH\", \"ADDRESS-CHANGED\", \"ALLOWED\",\n \"BHH\", \"BLOCKED\", \"BODY-PARSER-EVASION\", \"CODEINJECTION\", \"COMPRESSED\",\n \"CC-VAL-ATTEMPT\", \"CC-VAL-FAILURE\", \"CC-VAL-SUCCESS\", \"CVE-2017-5638\",\n \"CVE-2017-7269\", \"CVE-2017-9805\", \"CVE-2018-11776\", \"CVE-2018-15961\",\n \"CVE-2018-9206\", \"CVE-2019-0192\", \"CVE-2019-0193\", \"CVE-2019-0232\",\n \"CVE-2019-11580\", \"CVE-2019-14234\", \"CVE-2019-16759\", \"CVE-2019-2725\",\n \"CVE-2019-3396\", \"CVE-2019-3398\", \"CVE-2019-5418\", \"CVE-2019-6340\",\n \"CVE-2019-8394\", \"CVE-2019-8451\", \"CVE-2021-26084\", \"CVE-2021-26855\",\n \"CVE-2021-40438\", \"CVE-2021-44228\", \"CVE-2021-44228-STRICT\",\n \"CVE-2022-22963\", \"CVE-2022-22965\", \"CVE-2022-26134\", \"CVE-2022-42889\",\n \"CVE-2023-34362\", \"CVE-2023-38218\", \"DATACENTER\", \"DOUBLEENCODING\",\n \"EMAIL-CHANGED\", \"EMAIL-VALIDATION\", \"FORCEFULBROWSING\", \"GC-VAL-ATTEMPT\",\n \"GC-VAL-FAILURE\", \"GC-VAL-SUCCESS\", \"GRAPHQL-API\", \"GRAPHQL-DUPLICATE-VARIABLES\",\n \"GRAPHQL-IDE\", \"GRAPHQL-INTROSPECTION\", \"GRAPHQL-DEPTH\",\n \"GRAPHQL-MISSING-REQUIRED-OPERATION-NAME\",\n \"GRAPHQL-UNDEFINED-VARIABLES\", \"HTTP403\", \"HTTP404\", \"HTTP429\",\n \"HTTP4XX\", \"HTTP500\", \"HTTP503\", \"HTTP5XX\", \"IMPOSTOR\", \"INFO-VIEWED\",\n \"INSECURE-AUTH\", \"NOTUTF8\", \"INVITE-FAILURE\", \"INVITE-ATTEMPT\",\n \"INVITE-SUCCESS\", \"JSON-ERROR\", \"KBA-CHANGED\", \"LOGINATTEMPT\",\n \"LOGINDISCOVERY\", \"LOGINFAILURE\", \"LOGINSUCCESS\", \"MALFORMED-DATA\",\n \"SANS\", \"MESSAGE-SENT\", \"NO-CONTENT-TYPE\", \"NOUA\", \"NULLBYTE\",\n \"OOB-DOMAIN\", \"PW-CHANGED\", \"PW-RESET-ATTEMPT\", \"PW-RESET-FAILURE\",\n \"PW-RESET-SUCCESS\", \"PRIVATEFILE\", \"rate-limit\", \"REGATTEMPT\", \"REGFAILURE\",\n \"REGSUCCESS\", \"RSRC-ID-ENUM-ATTEMPT\", \"RSRC-ID-ENUM-FAILURE\",\n \"RSRC-ID-ENUM-SUCCESS\", \"RESPONSESPLIT\", \"SCANNER\", \"SIGSCI-IP\",\n \"TORNODE\", \"WRONG-API-CLIENT\", \"USER-ID-ENUM-ATTEMPT\",\n \"USER-ID-ENUM-FAILURE\", \"USER-ID-ENUM-SUCCESS\", \"WEAKTLS\", \"XML-ERROR\"\n ]\n attack_tags = \",\".join(attack_signals)\n anomaly_tags = \",\".join(anomaly_signals)\n url = f\"{url}&tags={attack_tags},{anomaly_tags}\"\n config = Config(\n url=url,\n api_host=api_host,\n from_time=from_time,\n until_time=until_time,\n global_email=global_email,\n global_corp_api_name=global_corp_api_name,\n current_site=current_site,\n request_timeout=request_timeout,\n read_timeout=read_timeout,\n )\n config.headers = {\n \"Content-type\": \"application/json\",\n \"x-api-user\": global_email,\n \"x-api-token\": global_api_token,\n \"User-Agent\": config.user_agent_string,\n }\n\n all_requests = get_results(\"Requests\", helper, config)\n\n total_requests = len(all_requests)\n helper.log_info(\"Total Requests Pulled: %s\" % total_requests)\n if total_requests == 0:\n helper.save_check_point(last_name, until_time)\n helper.log_info(\n f\"No events to write, saving checkpoint to value:{until_time}\"\n )\n write_start = timer()\n event_count = 0\n for current_event in all_requests:\n if key is None:\n source_type = helper.get_sourcetype()\n event = helper.new_event(\n source=single_name,\n index=helper.get_output_index(),\n sourcetype=source_type,\n data=current_event,\n )\n else:\n indexes = helper.get_output_index()\n current_index = indexes[key]\n types = helper.get_sourcetype()\n source_type = types[key]\n single_name = single_name[0]\n event = helper.new_event(\n source=single_name,\n index=current_index,\n sourcetype=source_type,\n data=current_event,\n )\n\n try:\n ew.write_event(event)\n event_count += 1 # increment the count for successful events to not spam debug.\n except Exception as e:\n helper.log_error(f\"error writing event: {e}\")\n helper.log_error(event)\n raise e\n if event_count != 0: # We save the checkpoint earlier on 0 events.\n helper.log_info(f\"{event_count} events written, saving checkpoint: {until_time}\")\n helper.save_check_point(last_name, until_time)\n write_end = timer()\n write_time = write_end - write_start\n write_time_result = round(write_time, 2)\n helper.log_info(\"Total Event Output Time: %s seconds\" % write_time_result)\n\n # If multiple inputs configured it creates an array of values and the\n # script only gets called once per Input configuration\n all_sites = helper.get_arg(\"site_api_name\")\n time_deltas = helper.get_arg(\"interval\")\n helper.log_info(f\"interval: {time_deltas}\")\n\n if type(all_sites) is dict:\n helper.log_info(\"run_type: Sequential\")\n for active_input, site in all_sites.items():\n time_delta = int(time_deltas[active_input])\n helper.log_info(\"site: %s\" % site)\n pull_requests(helper, key=active_input, current_site=site, delta=time_delta)\n helper.log_info(\"Finished Pulling Requests for %s\" % site)\n else:\n helper.log_info(\"Run Type: Concurrent\")\n site = helper.get_arg(\"site_api_name\")\n helper.log_info(\"site: %s\" % site)\n pull_requests(helper, current_site=site, delta=int(time_deltas))\n helper.log_info(\"Finished Pulling Requests for %s\" % site)\n end = timer()\n total_time = end - start\n time_result = round(total_time, 2)\n helper.log_info(\"Total Script Time: %s seconds\" % time_result)", "uuid": "aaaeb391da9043e1819408033d9db708", "sample_count": "18344"}]}, "field_extraction_builder": {"sigsci-event": {"is_parsed": true, "data_format": "json"}, "sigsci-activity": {"is_parsed": true, "data_format": "json"}, "sigsci-requests": {"is_parsed": true, "data_format": "json"}}, "global_settings_builder": {"global_settings": {"proxy_settings": {"proxy_type": "http"}, "log_settings": {"log_level": "DEBUG"}, "customized_settings": [{"required": true, "name": "email", "label": "E-mail", "default_value": "", "placeholder": "example@example.com", "help_string": "This it the e-mail username of the user who has the correct permissions for the app to pull the data.", "type": "text", "format_type": "text", "value": "jcocks+sigsci@fastly.com"}, {"required": true, "name": "corp_api_name", "label": "Corp", "default_value": "", "placeholder": "", "help_string": "This is the API name of your corp.", "type": "text", "format_type": "text", "value": "jcocks"}, {"required": true, "name": "api_token", "label": "API Token", "placeholder": "", "default_value": "", "help_string": "This is the API Token of the user who has the correct permissions. The API Token is tied to the username.", "type": "password", "format_type": "password", "value": ""}]}}, "sourcetype_builder": {"sigsci-requests": {"metadata": {"event_count": 0, "data_input_name": "SigsciRequests", "extractions_count": 0, "cims_count": 0}}, "sigsci-event": {"metadata": {"event_count": 0, "data_input_name": "SigsciEvent", "extractions_count": 0, "cims_count": 0}}, "sigsci-activity": {"metadata": {"event_count": 0, "data_input_name": "SigsciActivity", "extractions_count": 0, "cims_count": 0}}}, "validation": {"validators": ["best_practice_validation", "data_model_mapping_validation", "field_extract_validation", "app_cert_validation"], "status": "job_finished", "validation_id": "v_1710347946_44", "progress": 1.0}} \ No newline at end of file diff --git a/splunkbase_prepped_file/sigsci_TA_for_splunk-1.0.38.spl b/splunkbase_prepped_file/sigsci_TA_for_splunk-1.0.38.spl index e614ece..5190f29 100644 Binary files a/splunkbase_prepped_file/sigsci_TA_for_splunk-1.0.38.spl and b/splunkbase_prepped_file/sigsci_TA_for_splunk-1.0.38.spl differ