diff --git a/.github/workflows/authors.yml b/.github/workflows/authors.yml index 3fee4c5a2fde..5c4702a13858 100644 --- a/.github/workflows/authors.yml +++ b/.github/workflows/authors.yml @@ -8,20 +8,16 @@ jobs: name: New Author Check runs-on: ubuntu-latest steps: + - name: Checkout PR code + uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 - run: sudo apt -y install git - - run: git clone https://github.com/${{ github.repository }} - - run: git remote add author ${{ github.event.pull_request.head.repo.html_url }} - working-directory: suricata - - run: git fetch author - working-directory: suricata - - run: git checkout author/${{ github.event.pull_request.head.ref }} - working-directory: suricata - name: Export known authors from master branch - run: git log --format="%an <%ae>" origin/master | sort | uniq > ../authors.txt - working-directory: suricata + run: git log --format="%an <%ae>" origin/master | sort | uniq > authors.txt - name: Export authors from new commits - run: git log --format="%an <%ae>" origin/${GITHUB_BASE_REF}... | sort | uniq > ../commit-authors.txt - working-directory: suricata + run: git log --format="%an <%ae>" origin/${GITHUB_BASE_REF}... | sort | uniq > commit-authors.txt - name: Check new authors run: | touch new-authors.txt diff --git a/.github/workflows/builds.yml b/.github/workflows/builds.yml index 62748b10e985..ccb119bbfec6 100644 --- a/.github/workflows/builds.yml +++ b/.github/workflows/builds.yml @@ -28,9 +28,6 @@ env: # could cause some steps to fail. RUST_VERSION_KNOWN: "1.70.0" - # The minimum version of Rust supported. - RUST_VERSION_MIN: "1.63.0" - jobs: prepare-deps: @@ -562,172 +559,6 @@ jobs: - run: suricata-update -V - run: suricatasc -h - centos-stream8: - name: CentOS Stream 8 - runs-on: ubuntu-latest - container: quay.io/centos/centos:stream8 - needs: [prepare-deps, debian-12-dist] - steps: - # Cache Rust stuff. - - name: Cache cargo registry - uses: actions/cache@v3.3.1 - with: - path: ~/.cargo - key: ${{ github.job }}-cargo - - - name: Cache RPMs - uses: actions/cache@v3.3.1 - with: - path: /var/cache/dnf - key: ${{ github.job }}-dnf - - run: echo "keepcache=1" >> /etc/dnf/dnf.conf - - - name: Install system packages - run: | - dnf -y install dnf-plugins-core epel-release - dnf config-manager --set-enabled powertools - dnf -y install \ - autoconf \ - automake \ - diffutils \ - numactl-devel \ - dpdk-devel \ - file-devel \ - gcc \ - gcc-c++ \ - git \ - jansson-devel \ - jq \ - lua-devel \ - libtool \ - libyaml-devel \ - libnfnetlink-devel \ - libnetfilter_queue-devel \ - libnet-devel \ - libcap-ng-devel \ - libevent-devel \ - libmaxminddb-devel \ - libpcap-devel \ - libtool \ - lz4-devel \ - make \ - nss-devel \ - pcre2-devel \ - pkgconfig \ - python3-devel \ - python3-yaml \ - rust-toolset \ - sudo \ - which \ - zlib-devel - - name: Download suricata.tar.gz - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a - with: - name: dist - - run: tar zxvf suricata-*.tar.gz --strip-components=1 - - name: ./configure - run: CFLAGS="${DEFAULT_CFLAGS}" ./configure - - run: make -j2 - - run: make install - - run: make install-conf - - run: suricatasc -h - - run: suricata-update -V - - name: Check if Suricata-Update example configuration files are installed - run: | - test -e /usr/local/lib/suricata/python/suricata/update/configs/disable.conf - test -e /usr/local/lib/suricata/python/suricata/update/configs/drop.conf - test -e /usr/local/lib/suricata/python/suricata/update/configs/enable.conf - test -e /usr/local/lib/suricata/python/suricata/update/configs/modify.conf - test -e /usr/local/lib/suricata/python/suricata/update/configs/threshold.in - test -e /usr/local/lib/suricata/python/suricata/update/configs/update.yaml - - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a - with: - name: prep - path: prep - - run: tar xf prep/suricata-verify.tar.gz - - run: python3 ./suricata-verify/run.py -q --debug-failed - - run: suricata-update -V - - run: suricatasc -h - - centos-7: - name: CentOS 7 - runs-on: ubuntu-latest - container: centos:7 - needs: [prepare-deps, debian-12-dist] - steps: - - name: Cache ~/.cargo - uses: actions/cache@v3.3.1 - with: - path: ~/.cargo - key: ${{ github.job }}-cargo - - - name: Cache RPMs - uses: actions/cache@v3.3.1 - with: - path: /var/cache/yum - key: ${{ github.job }}-yum - - run: echo "keepcache=1" >> /etc/yum.conf - - - name: Install system dependencies - run: | - yum -y install epel-release - yum -y install \ - autoconf \ - automake \ - cargo \ - diffutils \ - file-devel \ - gcc \ - gcc-c++ \ - jansson-devel \ - jq \ - lua-devel \ - libtool \ - libyaml-devel \ - libnfnetlink-devel \ - libnetfilter_queue-devel \ - libnet-devel \ - libcap-ng-devel \ - libevent-devel \ - libmaxminddb-devel \ - libpcap-devel \ - lz4-devel \ - make \ - nss-devel \ - pcre2-devel \ - pkgconfig \ - python36-PyYAML \ - rust \ - sudo \ - which \ - zlib-devel - - name: Download suricata.tar.gz - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a - with: - name: dist - - run: tar zxvf suricata-*.tar.gz --strip-components=1 - # This isn't really needed as we are building from a prepared - # package, but some package managers like RPM and Debian like to - # run this command even on prepared packages, so make sure it - # works. - - name: Test autoreconf - run: autoreconf -fv --install - - run: CFLAGS="${DEFAULT_CFLAGS}" ./configure - - run: make -j2 - - run: make install - - run: make install-conf - - run: make distcheck - - run: make clean - - run: make -j2 - - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a - with: - name: prep - path: prep - - run: tar xf prep/suricata-verify.tar.gz - - run: python3 ./suricata-verify/run.py -q --debug-failed - - run: suricata-update -V - - run: suricatasc -h - fedora-38-sv-codecov: name: Fedora 38 (Suricata Verify codecov) runs-on: ubuntu-latest @@ -1352,6 +1183,57 @@ jobs: else exit 0 fi + almalinux-9-minimal-recommended-dependecies: + name: AlmaLinux 9 (Minimal/Recommended Build) + runs-on: ubuntu-latest + container: almalinux:9 + needs: [prepare-deps] + steps: + # Cache Rust stuff. + - name: Cache cargo registry + uses: actions/cache@v3.3.1 + with: + path: ~/.cargo + key: ${{ github.job }}-cargo + + - name: Cache RPMs + uses: actions/cache@v3.3.1 + with: + path: /var/cache/dnf + key: ${{ github.job }}-dnf + - run: echo "keepcache=1" >> /etc/dnf/dnf.conf + + - name: Determine number of CPUs + run: echo CPUS=$(nproc --all) >> $GITHUB_ENV + + - name: Install git dependencies + run: | + dnf -y install \ + sudo \ + git \ + libtool \ + which + + - name: Install Almalinux 9 extra repositories + run : | + dnf -y update + dnf -y install dnf-plugins-core epel-release + dnf config-manager --set-enabled crb + + - uses: actions/checkout@v3.5.3 + - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a + with: + name: prep + path: prep + - run: tar xf prep/libhtp.tar.gz + - run: ./autogen.sh + + - name: Install minimal dependencies + run: ./scripts/docs-almalinux9-minimal-build.sh + + - run: CFLAGS="${DEFAULT_CFLAGS}" ./configure + - run: make -j ${{ env.CPUS }} + - run: ./src/suricata --build-info # check if we can run Suricata ubuntu-22-04-cov-ut: name: Ubuntu 22.04 (unittests coverage) @@ -1473,7 +1355,9 @@ jobs: ubuntu-22-04-cov-fuzz: name: Ubuntu 22.04 (fuzz corpus coverage) runs-on: ubuntu-latest - container: ubuntu:22.04 + container: + image: ubuntu:22.04 + options: --privileged needs: [prepare-deps, prepare-cbindgen] steps: - name: Cache ~/.cargo @@ -1519,6 +1403,7 @@ jobs: parallel \ python3-yaml \ software-properties-common \ + sudo \ zlib1g \ zlib1g-dev \ exuberant-ctags \ @@ -1542,6 +1427,11 @@ jobs: cp prep/cbindgen $HOME/.cargo/bin chmod 755 $HOME/.cargo/bin/cbindgen echo "$HOME/.cargo/bin" >> $GITHUB_PATH + - name: Fix kernel mmap rnd bits + # Asan in llvm 14 provided in ubuntu 22.04 is incompatible with + # high-entropy ASLR in much newer kernels that GitHub runners are + # using leading to random crashes: https://github.com/actions/runner-images/issues/9491 + run: sudo sysctl vm.mmap_rnd_bits=28 - run: ./autogen.sh - run: ./configure --with-gnu-ld --enable-fuzztargets --disable-shared --enable-gccprotect env: @@ -1717,7 +1607,9 @@ jobs: ubuntu-22-04-debug-validation: name: Ubuntu 22.04 (Debug Validation) runs-on: ubuntu-22.04 - container: ubuntu:22.04 + container: + image: ubuntu:22.04 + options: --privileged needs: [prepare-deps, prepare-cbindgen] steps: @@ -1762,6 +1654,7 @@ jobs: python3-yaml \ rustc \ software-properties-common \ + sudo \ zlib1g \ zlib1g-dev \ exuberant-ctags @@ -1777,6 +1670,11 @@ jobs: cp prep/cbindgen $HOME/.cargo/bin chmod 755 $HOME/.cargo/bin/cbindgen echo "$HOME/.cargo/bin" >> $GITHUB_PATH + - name: Fix kernel mmap rnd bits + # Asan in llvm 14 provided in ubuntu 22.04 is incompatible with + # high-entropy ASLR in much newer kernels that GitHub runners are + # using leading to random crashes: https://github.com/actions/runner-images/issues/9491 + run: sudo sysctl vm.mmap_rnd_bits=28 - run: ./autogen.sh - run: ./configure --enable-debug-validation env: @@ -1944,6 +1842,45 @@ jobs: - run: make -j2 - run: ./src/suricata --build-info | grep -E "Netmap support:\s+yes" + ubuntu-22-04-minimal-recommended-build: + name: Ubuntu 22.04 (Minimal/Recommended Build) + needs: [prepare-deps, prepare-cbindgen] + runs-on: ubuntu-22.04 + steps: + # Cache Rust stuff. + - name: Cache cargo registry + uses: actions/cache@v3.3.1 + with: + path: ~/.cargo/registry + key: cargo-registry + + - name: Determine number of CPUs + run: echo CPUS=$(nproc --all) >> $GITHUB_ENV + + - name: Install git dependencies + run: | + sudo apt update + sudo apt -y install \ + git \ + libtool + + - uses: actions/checkout@v3.5.3 + - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a + with: + name: prep + path: prep + - run: tar xf prep/libhtp.tar.gz + - run: tar xf prep/suricata-update.tar.gz + - run: tar xf prep/suricata-verify.tar.gz + - run: ./autogen.sh + + - name: Install minimal dependencies + run: ./scripts/docs-ubuntu-debian-minimal-build.sh + + - run: CFLAGS="${DEFAULT_CFLAGS}" ./configure + - run: make -j ${{ env.CPUS }} + - run: ./src/suricata --build-info # check if we can run Suricata + ubuntu-22-04-dpdk-build: name: Ubuntu 22.04 (DPDK Build) runs-on: ubuntu-22.04 @@ -2114,7 +2051,8 @@ jobs: - run: CFLAGS="${DEFAULT_CFLAGS}" ./configure --enable-unittests - run: make -j2 - run: make check - - run: make -j2 distcheck + # -j2 caused random failures during cargo vendor + - run: make distcheck env: DISTCHECK_CONFIGURE_FLAGS: "--enable-unittests --enable-debug --enable-lua --enable-geoip --enable-profiling --enable-profiling-locks --enable-dpdk" - run: test -e doc/userguide/suricata.1 @@ -2263,14 +2201,14 @@ jobs: texlive-latex-extra \ zlib1g \ zlib1g-dev - - name: Install Rust - run: curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain ${RUST_VERSION_MIN} -y - - run: echo "$HOME/.cargo/bin" >> $GITHUB_PATH - uses: actions/checkout@v3.5.3 - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a with: name: prep path: prep + - name: Install Rust + run: curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain $(grep rust-version rust/Cargo.toml.in|sed 's/\"//g'|awk '{print $3}') -y + - run: echo "$HOME/.cargo/bin" >> $GITHUB_PATH - name: Setup cbindgen run: | mkdir -p $HOME/.cargo/bin @@ -2315,7 +2253,6 @@ jobs: ccache \ curl \ git \ - gosu \ jq \ libpcre2-dev \ libpcap-dev \ @@ -2390,7 +2327,6 @@ jobs: ccache \ curl \ git \ - gosu \ jq \ libpcre2-dev \ libpcap-dev \ @@ -2465,7 +2401,9 @@ jobs: libnet \ libtool \ libyaml \ + pyyaml \ lua \ + pcre2 \ pkg-config \ python \ rust \ @@ -2473,7 +2411,6 @@ jobs: - name: Install cbindgen run: cargo install --debug --version 0.24.3 cbindgen - run: echo "$HOME/.cargo/bin" >> $GITHUB_PATH - - run: pip3 install PyYAML - uses: actions/checkout@v3.5.3 - name: Downloading prep archive uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a @@ -2483,8 +2420,8 @@ jobs: - run: tar xvf prep/libhtp.tar.gz - run: tar xvf prep/suricata-update.tar.gz - run: ./autogen.sh - - run: CFLAGS="${DEFAULT_CFLAGS}" ./configure --enable-unittests - - run: make -j2 + - run: CPATH="$HOMEBREW_PREFIX/include:$CPATH" LIBRARY_PATH="$HOMEBREW_PREFIX/lib:$LIBRARY_PATH" PATH="/opt/homebrew/opt/libtool/libexec/gnubin:$PATH" CFLAGS="${DEFAULT_CFLAGS}" ./configure --enable-unittests --prefix="$HOME/.local/" + - run: CPATH="$HOMEBREW_PREFIX/include:$CPATH" LIBRARY_PATH="$HOMEBREW_PREFIX/lib:$LIBRARY_PATH" PATH="/opt/homebrew/opt/libtool/libexec/gnubin:$PATH" CFLAGS="${DEFAULT_CFLAGS}" make -j2 # somehow it gets included by some C++ stdlib header (case unsensitive) - run: rm libhtp/VERSION && make check - run: tar xf prep/suricata-verify.tar.gz diff --git a/.github/workflows/cifuzz.yml b/.github/workflows/cifuzz.yml index 8a55272cccbd..c6eecc3cf812 100644 --- a/.github/workflows/cifuzz.yml +++ b/.github/workflows/cifuzz.yml @@ -14,6 +14,12 @@ jobs: matrix: sanitizer: [address, undefined] steps: + - name: Clear unnecessary files + run: | + df + sudo apt clean + sudo rm -rf /usr/share/dotnet/ /usr/share/swift /usr/local/.ghcup/ /usr/local/share/powershell /usr/local/share/chromium /usr/local/lib/android /usr/local/lib/node_modules + df - name: Build Fuzzers (${{ matrix.sanitizer }}) uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master with: diff --git a/.github/workflows/commits.yml b/.github/workflows/commits.yml index 04bbb3fdf96b..12368cb54cfb 100644 --- a/.github/workflows/commits.yml +++ b/.github/workflows/commits.yml @@ -78,6 +78,9 @@ jobs: - run: /usr/bin/git config --global --add safe.directory /__w/suricata/suricata - run: git fetch - run: git clone https://github.com/OISF/libhtp -b 0.5.x + - name: Checking Cherry-pick lines + run: | + ./qa/validate-cp.sh "${GITHUB_BASE_REF}" - name: Building all commits run: | echo "Building commits from ${GITHUB_BASE_REF}." diff --git a/.github/workflows/formatting.yml b/.github/workflows/formatting.yml index ca7b018e36df..03a8e81169f5 100644 --- a/.github/workflows/formatting.yml +++ b/.github/workflows/formatting.yml @@ -21,9 +21,9 @@ jobs: # Checking for correct formatting of branch for C code changes check-formatting: - name: Formatting Check (clang 9) - runs-on: ubuntu-20.04 - container: ubuntu:20.04 + name: Formatting Check (clang 14) + runs-on: ubuntu-22.04 + container: ubuntu:22.04 continue-on-error: false steps: @@ -43,6 +43,8 @@ jobs: autoconf \ automake \ cargo \ + cbindgen \ + clang-format-14 \ git \ libtool \ libpcap-dev \ @@ -58,21 +60,14 @@ jobs: libnfnetlink0 \ libhiredis-dev \ libjansson-dev \ - libpython2.7 \ make \ - python \ rustc \ + python-is-python3 \ + python3 \ software-properties-common \ wget \ zlib1g \ zlib1g-dev - - name: Install packages for clang-format 9 - run: | - # no need to install full clang - apt-get install -y clang-format-9 - - name: Install cbindgen - run: cargo install --force --debug --version 0.24.3 cbindgen - - run: echo "$HOME/.cargo/bin" >> $GITHUB_PATH # Checking out the branch is not as simple as "checking out". # # In case master has any new commits since we branched off, github will diff --git a/.github/workflows/scan-build.yml b/.github/workflows/scan-build.yml index ef9c10bf1df9..ef675297f5af 100644 --- a/.github/workflows/scan-build.yml +++ b/.github/workflows/scan-build.yml @@ -12,7 +12,7 @@ jobs: scan-build: name: Scan-build runs-on: ubuntu-latest - container: ubuntu:23.04 + container: ubuntu:24.04 steps: - name: Cache scan-build uses: actions/cache@v3.3.1 @@ -30,8 +30,8 @@ jobs: automake \ cargo \ cbindgen \ - clang-16 \ - clang-tools-16 \ + clang-18 \ + clang-tools-18 \ dpdk-dev \ git \ libtool \ @@ -56,7 +56,7 @@ jobs: libevent-pthreads-2.1-7 \ libjansson-dev \ liblz4-dev \ - llvm-16-dev \ + llvm-18-dev \ make \ python3-yaml \ rustc \ @@ -66,14 +66,14 @@ jobs: - uses: actions/checkout@v3.5.3 - run: ./scripts/bundle.sh - run: ./autogen.sh - - run: scan-build-16 ./configure --enable-dpdk --enable-nfqueue --enable-nflog + - run: scan-build-18 ./configure --enable-dpdk --enable-nfqueue --enable-nflog env: - CC: clang-16 + CC: clang-18 # exclude libhtp from the analysis # disable security.insecureAPI.DeprecatedOrUnsafeBufferHandling explicitly as # this will require significant effort to address. - run: | - scan-build-16 --status-bugs --exclude libhtp/ \ + scan-build-18 --status-bugs --exclude libhtp/ \ -enable-checker valist.Uninitialized \ -enable-checker valist.CopyToSelf \ -enable-checker valist.Unterminated \ @@ -95,4 +95,4 @@ jobs: \ make env: - CC: clang-16 + CC: clang-18 diff --git a/ChangeLog b/ChangeLog index c5ffe05113c1..3a3642a10b34 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,172 @@ +7.0.6 -- 2024-06-26 + +Security #7105: http2: oom from duplicate headers (7.0.x backport)(CRITICAL - CVE 2024-38535) +Security #7042: defrag: id reuse can lead to invalid reassembly (7.0.x backport)(CRITICAL - CVE 2024-37151) +Security #7033: http/range: segv when http.memcap is reached (7.0.x backport)(HIGH - CVE 2024-38536) +Security #6988: modbus: txs without responses are never freed (7.0.x backport)(HIGH - CVE 2024-38534) +Bug #7107: packet: app-layer-events incorrectly used on recycled packets (7.0.x backport) +Bug #7064: util/radix-tree: Possible dereference of nullptr in case of unsuccess allocation of memory for node (7.0.x backport) +Bug #7063: smtp/mime: data command rejected by pipelining server does not reset data mode (7.0.x backport) +Bug #7060: smtp: split name logged as 2 names (7.0.x backport) +Bug #7050: af-packet: failure to start up on many threads plus high load (7.0.x backport) +Bug #7043: streaming/buffer: crash in HTTP body handling (7.0.x backport) +Bug #7038: pcap/log: MacOS rotates file well before limit is reached (7.0.x backport) +Bug #7035: time: in offline mode, time can stay behind at pcap start (7.0.x backport) +Bug #7023: unix-socket: iface-bypassed-stat crash (7.0.x backport) +Bug #7021: unix-socket: hostbit commands don't properly release host (7.0.x backport) +Bug #7015: rust: build with rust 1.78 with slice::from_raw_parts now requiring the pointer to be non-null (7.0.x backport) +Bug #6990: tls.random buffers don't work as expected (7.0.x backport) +Bug #6986: iprep: rule with '=,0' can't match (7.0.x backport) +Bug #6975: detect: log relevant frames app-layer metdata (7.0.x backport) +Bug #6950: decode/ppp: decoder.event.ppp.wrong_type on valid packet (7.0.x backport) +Bug #6897: detect/port: upper boundary ports are not correctly handled (7.0.x backport) +Bug #6895: detect/port: port grouping does not happen correctly if gap between a single and range port (7.0.x backport) +Bug #6862: profiling/rules: crash when profiling ends (7.0.x backport) +Bug #6848: alerts: wrongly using tx id 0 when there is no tx (7.0.x backport) +Bug #6845: coverity: warning in port grouping code (7.0.x backport) +Bug #6844: detect/port: port ranges are incorrect when a port is single as well as a part of range (7.0.x backport) +Bug #6690: eve: ethernet src_mac should match src_ip (7.0.x backport) +Bug #6520: detect-engine/port: recursive DetectPortInsert calls are expensive (7.0.x backport) +Optimization #6830: detect/port: port grouping is quite slow in worst cases (7.0.x backport) +Optimization #6829: detect/port: PortGroupWhitelist fn takes a lot of processing time (7.0.x backport) +Feature #7010: JA4 support for TLS and QUIC (7.0.x backport) +Feature #6557: profiling/rules: allow enabling profiling for pcap file runs (7.0.x backport) +Documentation #6910: userguide: document how to verify tar.gz signature (7.0.x backport) +Documentation #6687: docs: port userguide build instruction changes from master-6.0.x (7.0.x backport) +Documentation #6601: docs: update eBPF installation instructions (7.0.x backport) + +7.0.5 -- 2024-04-23 + +Security #6905: base64: off-by-three overflow in DecodeBase64() (7.0.x backport)(CVE 2024-32664) +Security #6901: http2: timeout logging headers (7.0.x backport)(CVE 2024-32663) +Security #6893: http2: oom on copying compressed headers (7.0.x backport)(CVE 2024-32663) +Security #6677: ip-defrag: packet can be considered complete even with holes (7.0.x backport)(CVE 2024-32867) +Security #6673: ip defrag: final overlapping packet can lead to "hole" in re-assembled data (7.0.x backport)(CVE 2024-32867) +Security #6672: ip defrag: re-assembly error in bsd policy (7.0.x backport)(CVE 2024-32867) +Bug #6970: streaming buffer: heap overflows in StreamingBufferAppend()/StreamingBufferAppendNoTrack() (7.0.x backport) +Bug #6966: improve handling of content encoding: gzip but request_body not actually compressed (7.0.x backport) +Bug #6958: Assert: BUG_ON(id <= 0 || id > (int)thread_store.threads_size); (7.0.x backport) +Bug #6949: detect/http.response_body: false positive because not enforcing direction to_client (7.0.x backport) +Bug #6945: defrag: reassembled packet can have wrong datatype (7.0.x backport) +Bug #6923: jsonbuilder: serializes Rust f64 NaNs to an invalid literal (7.0.x backport) +Bug #6919: pcre2 compile warning (7.0.x backport) +Bug #6907: Fix stats key (7.0.x backport) +Bug #6890: detect: slowdown in rule parsing (7.0.x backport) +Bug #6884: rust: clippy 1.77 warning (7.0.x backport) +Bug #6882: Detect: ipopts keyword misfires (7.0.x backport) +Bug #6872: dpdk: fix compatibility issues for ice cards (7.0.x backport) +Bug #6863: BUG_ON triggered from TmThreadsInjectFlowById (7.0.x backport) +Bug #6859: fast_pattern specification in base64_data shouldn't be allowed (7.0.x backport) +Bug #6727: stream: stream.drop-invalid drops valid traffic (7.0.x backport) +Bug #6679: datasets: discard datasets that hit the memcap while loading correctly (7.0.x backport) +Bug #6313: napatech: display HBA deprecation notice only once +Optimization #6880: conf: quadratic complexity in yaml loader (7.0.x backport) +Feature #6947: pcap: datalink type 229 not (yet) supported in module PcapFile (7.0.x backport) +Feature #6696: dpdk: power saving mode (7.0.x backport) +Documentation #6912: manpages: use consistant date based on release and/or git commits (7.0.x backport) + +7.0.4 -- 2024-03-19 + +Security #6868: eve: excessive ssh long banner logging (7.0.x backport)(CVE 2024-28870) +Security #6801: ssh: quadratic complexity in overlong banner (7.0.x backport)(CVE 2024-28870) +Security #6759: libhtp: quadratic complexity checking after request line mission protocol (7.0.x backport)(CVE 2024-28871) +Security #6798: output/filestore: timeout because of running OutputTxLog on useless packets (7.0.x backport) +Bug #6842: Error message from netmap when using Netmap pipes (with lb) (7.0.x backport) +Bug #6828: dpdk: NUMA warning on non-NUMA system (7.0.x backport) +Bug #6816: capture plugins: capture plugins unusable due to initialization order (7.0.x backport) +Bug #6812: pfring: memory leak (7.0.x backport) +Bug #6810: decode/pppoe: Suspicious pointer scaling (7.0.x backport) +Bug #6791: cppcheck 2.11 errors (7.0.x backport) +Bug #6785: detect/tls.certs: direction flag checked against wrong field (7.0.x backport) +Bug #6784: util/mime: Memory leak at util-decode-mime.c:MimeDecInitParser (7.0.x backport) +Bug #6768: multi-tenancy: dead lock during tenant loading (7.0.x backport) +Bug #6765: Hugepages Error for FreeBSD when kernel NUMA build option is not enabled (7.0.x backport) +Bug #6764: Huge increase on Suricata load time with a lot of ip-only rules and bigger HOME_NET (7.0.x backport) +Bug #6761: Hugepages Error for ARM64 and af-packet IPS mode (7.0.x backport) +Bug #6756: Netmap: deadlock if netmap_open fails (7.0.x backport) +Bug #6746: Suricata 7.0.2 parent interface object in stats contains VLAN-ID as keys (7.0.x backport) +Bug #6742: dpdk: automatic cache calculation is broken (7.0.x backport) +Bug #6738: dpdk: property configuration can lead to integer overflow (7.0.x backport) +Bug #6734: tcp: tcp flow flags changing incorrectly when ruleset contains content matching (7.0.x backport) +Bug #6622: detect/filestore: flow, to_server was broken by moving files into transactions (7.0.x backport) +Bug #6593: mqtt: frames on TCP are not set properly when parsing multiple PDUs in one go (7.0.x backport) +Bug #6580: ssh: no alert on packet with Message Code: New Keys (21) (7.0.x backport) +Bug #6538: drop: assertion failed !(PKT_IS_PSEUDOPKT(p)) && !PacketCheckAction(p, ACTION_DROP) (7.0.x backport) +Bug #6537: detect/filestore: be more explicit about the U16_MAX limit per signature group head (7.0.x backport) +Optimization #6774: app-layer/template: no limit on txs number (7.0.x backport) +Feature #6740: dpdk: warn the user if user-settings are adjusted to the device capabilities (7.0.x backport) +Task #6870: libhtp 0.5.47 (7.0.x backport) +Task #6749: doc: mention X710 RX descriptor limitation (7.0.x backport) +Documentation #6709: userguide/payload: fix explanation about bsize ranges (7.0.x backport) + +7.0.3 -- 2024-02-08 + +Security #6717: http2: evasion by splitting header fields over frames (7.0.x backport) +Security #6657: detect: heap use after free with http.request_header keyword (7.0.x backport) +Security #6540: http1: configurable limit for maximum number of live transactions per flow (7.0.x backport) +Security #6539: mqtt pcap with anomalies takes too long to process (7.0.x backport) +Security #6536: pgsql: quadratic complexity leads to over consumption of memory (7.0.x backport) +Security #6533: http1: quadratic complexity from infinite folded headers (7.0.x backport) +Security #6532: SMTP: quadratic complexity from unbounded number of transaction per flow (7.0.x backport) +Security #6531: http2: quadratic complexity in find_or_create_tx not bounded by max-tx (7.0.x backport) +Bug #6711: rules: failed rules after a skipped rule are recorded as skipped, not failed (7.0.x backport) +Bug #6700: detect/requires: assertion failed !(ret == -4) (7.0.x backport) +Bug #6697: dpdk: Analyze hugepage allocation on startup more thoroughly (7.0.x backport) +Bug #6688: log-pcap: crash with suricata.yaml setting max-file to 1 (7.0.x backport) +Bug #6665: eve/smtp: attachment filenames not logged (7.0.x backport) +Bug #6662: content-inspect: FN on negative distance (7.0.x backport) +Bug #6636: stats: flows with a detection-only alproto not accounted in this protocol (7.0.x backport) +Bug #6635: Profiling takes much longer to run than it used to (7.0.x backport) +Bug #6620: Endace: timestamp fixes (7.0.x backport) +Bug #6616: detect/analyzer: misrepresenting negative distance value (7.0.x backport) +Bug #6596: SCTIME_ADD_SECS() macro zeros out ts.usec part (7.0.x backport) +Bug #6595: SCTIME_FROM_TIMESPEC() creates incorrect timestamps (7.0.x backport) +Bug #6558: HTTP/2 - http.response_line has leading space (7.0.x backport) +Bug #6556: Invalid registration of prefiltering in stream size (7.0.x backport) +Bug #6535: http.header, http.header.raw and http.request_header buffers not populated when malformed header value exists (7.0.x backport) +Bug #6521: pgsql: u16 overflow found by oss-fuzz w/ quadfuzz (7.0.x backport) +Bug #6508: pgsql/probe: TCP on 5432 traffic incorrectly tagged as PGSQL (7.0.x backport) +Bug #6479: HTTP/2 - when userinfo is in the :authority pseudo header it breaks http.host +Bug #6448: detect: flow:established,not_established considered as valid even if it can never match +Bug #6438: eve filetype plugins: file type plugins do not de-initialize properly +Bug #6436: host: ip rep prevents tag/threshold/hostbits cleanup +Bug #6435: packetpool: fix single packet return logic +Bug #6423: detect-filesize no longer supports units in value +Bug #6420: dns/eve: an empty format section results in no response details being logged +Bug #6294: http2/brotli: subtract with overflow found by sydr-Fuzz +Bug #6292: Flow manager stuck forever on race condition for return stack +Bug #6278: add a hint if user/group name is not set +Bug #6272: dpdk: big mempool leads to an error with suricatasc unix socket +Bug #4623: byte_jump with negative post_offset value fails at the end of the buffer +Feature #6614: transformation - strip_pseudo_headers (7.0.x backport) +Feature #6613: support case insensitive testing of HTTP header name existence (7.0.x backport) +Feature #6612: New Transformation: to_lowercase (7.0.x backport) +Feature #6524: rules: "requires" keyword representing the minimum version of suricata to support the rule (7.0.x backport) +Feature #6507: HTTP/2 - app-layer-event and normalization when userinfo is in the :authority pseudo header for the http.host header (7.0.x backport) +Feature #6425: HTTP/2 - new app-layer-event when `:authority` and `host` headers do not match +Task #6606: flash decompression: update/remove deprecation warnings (7.0.x backport) +Task #6604: pgsql: don't log password msg if password disabled (7.0.x backport) +Task #6581: pgsql: add cancel request message (7.0.x backport) +Task #6564: doc: document file.data (7.0.x backport) +Task #6534: runmodes: remove reference to auto modes (7.0.x backport) +Task #6523: libhtp 0.5.46 (7.0.x backport) +Task #6345: Convert unittests to new FAIL/PASS API - util-misc.c +Task #6339: Convert unittests to new FAIL/PASS API - detect-tcp-window.c +Task #6332: Convert unittests to new FAIL/PASS API - detect-bytetest.c +Task #6329: Convert unittests to new FAIL/PASS API - flow-bit.c +Task #6328: Convert unittests to new FAIL/PASS API - detect-bytejump.c +Documentation #6699: remove references in docs mentioning prehistoric Suricata versions (7.0.x backport) +Documentation #6631: Fix byte_test examples (7.0.x backport) +Documentation #6594: docs: fix broken bulleted list style on rtd (7.0.x backport) +Documentation #6513: userguide: update tls eve-log fields 'not_before' and 'not_after' (7.0.x backport) +Documentation #6511: userguide: document "tag" keyword (7.0.x backport) +Documentation #6504: userguide: explain what flow_id is (7.0.x backport) +Documentation #6383: misc: improve code documentation +Documentation #6371: spelling error in the docs +Documentation #5720: Install: Be consistent with use of the "sudo" +Documentation #5473: doc: upgrade guide for upgrading from 6 to 7 +Documentation #4584: Rust doc: add docstring to rust module files + 7.0.2 -- 2023-10-18 Security #6306: mime: quadratic complexity in MimeDecAddEntity diff --git a/Makefile.am b/Makefile.am index 67963ed32fcf..d0d3d0981272 100644 --- a/Makefile.am +++ b/Makefile.am @@ -7,7 +7,9 @@ EXTRA_DIST = ChangeLog COPYING LICENSE suricata.yaml.in \ $(SURICATA_UPDATE_DIR) \ lua \ acsite.m4 \ - scripts/generate-images.sh + scripts/generate-images.sh \ + scripts/docs-almalinux9-minimal-build.sh \ + scripts/docs-ubuntu-debian-minimal-build.sh SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \ $(SURICATA_UPDATE_DIR) diff --git a/configure.ac b/configure.ac index b377a1da9f2d..1eaf7c11a3fb 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,4 @@ - AC_INIT([suricata],[7.0.3-dev]) + AC_INIT([suricata],[7.0.7-dev]) m4_ifndef([AM_SILENT_RULES], [m4_define([AM_SILENT_RULES],[])])AM_SILENT_RULES([yes]) AC_CONFIG_HEADERS([src/autoconf.h]) AC_CONFIG_SRCDIR([src/suricata.c]) @@ -219,6 +219,11 @@ [], [ #include ]) + AC_CHECK_DECL([_popcnt64], + AC_DEFINE([HAVE_POPCNT64], [1], [Use _popcnt64]), + [], [ + #include + ]) OCFLAGS=$CFLAGS CFLAGS="" @@ -1575,12 +1580,12 @@ echo exit 1 fi - PKG_CHECK_MODULES(LIBHTPMINVERSION, [htp >= 0.5.45],[libhtp_minver_found="yes"],[libhtp_minver_found="no"]) + PKG_CHECK_MODULES(LIBHTPMINVERSION, [htp >= 0.5.48],[libhtp_minver_found="yes"],[libhtp_minver_found="no"]) if test "$libhtp_minver_found" = "no"; then PKG_CHECK_MODULES(LIBHTPDEVVERSION, [htp = 0.5.X],[libhtp_devver_found="yes"],[libhtp_devver_found="no"]) if test "$libhtp_devver_found" = "no"; then echo - echo " ERROR! libhtp was found but it is neither >= 0.5.45, nor the dev 0.5.X" + echo " ERROR! libhtp was found but it is neither >= 0.5.48, nor the dev 0.5.X" echo exit 1 fi @@ -1596,6 +1601,7 @@ AC_CHECK_LIB([htp], [htp_config_set_lzma_layers],AC_DEFINE_UNQUOTED([HAVE_HTP_CONFIG_SET_LZMA_LAYERS],[1],[Found htp_config_set_lzma_layers function in libhtp]) ,,[-lhtp]) AC_CHECK_LIB([htp], [htp_config_set_compression_bomb_limit],AC_DEFINE_UNQUOTED([HAVE_HTP_CONFIG_SET_COMPRESSION_BOMB_LIMIT],[1],[Found htp_config_set_compression_bomb_limit function in libhtp]) ,,[-lhtp]) AC_CHECK_LIB([htp], [htp_config_set_compression_time_limit],AC_DEFINE_UNQUOTED([HAVE_HTP_CONFIG_SET_COMPRESSION_TIME_LIMIT],[1],[Found htp_config_set_compression_time_limit function in libhtp]) ,,[-lhtp]) + AC_CHECK_LIB([htp], [htp_config_set_max_tx],AC_DEFINE_UNQUOTED([HAVE_HTP_CONFIG_SET_MAX_TX],[1],[Found htp_config_set_max_tx function in libhtp]) ,,[-lhtp]) ]) if test "x$enable_non_bundled_htp" = "xno"; then @@ -1620,6 +1626,7 @@ AC_DEFINE_UNQUOTED([HAVE_HTP_CONFIG_SET_LZMA_LAYERS],[1],[Assuming htp_config_set_lzma_layers function in bundled libhtp]) AC_DEFINE_UNQUOTED([HAVE_HTP_CONFIG_SET_COMPRESSION_BOMB_LIMIT],[1],[Assuming htp_config_set_compression_bomb_limit function in bundled libhtp]) AC_DEFINE_UNQUOTED([HAVE_HTP_CONFIG_SET_COMPRESSION_TIME_LIMIT],[1],[Assuming htp_config_set_compression_time_limit function in bundled libhtp]) + AC_DEFINE_UNQUOTED([HAVE_HTP_CONFIG_SET_MAX_TX],[1],[Assuming htp_config_set_max_tx function in bundled libhtp]) else echo echo " ERROR: Libhtp is not bundled. Get libhtp by doing:" @@ -2201,6 +2208,27 @@ fi fi + AC_ARG_ENABLE(ja3, + AS_HELP_STRING([--disable-ja3], [Disable JA3 support]), + [enable_ja3="$enableval"], + [enable_ja3=yes]) + if test "$enable_ja3" = "yes"; then + AC_DEFINE([HAVE_JA3],[1],[JA3 enabled]) + enable_ja3="yes" + fi + AM_CONDITIONAL([HAVE_JA3], [test "x$enable_ja3" != "xno"]) + + AC_ARG_ENABLE(ja4, + AS_HELP_STRING([--disable-ja4], [Disable JA4 support]), + [enable_ja4="$enableval"], + [enable_ja4=yes]) + if test "$enable_ja4" = "yes"; then + AC_DEFINE([HAVE_JA4],[1],[JA4 enabled]) + enable_ja4="yes" + fi + AM_CONDITIONAL([HAVE_JA4], [test "x$enable_ja4" != "xno"]) + + # Check for lz4 enable_liblz4="yes" AC_CHECK_LIB(lz4, LZ4F_createCompressionContext, , enable_liblz4="no") @@ -2394,7 +2422,7 @@ fi AC_PATH_PROG(CBINDGEN, cbindgen, "no") if test "x$CBINDGEN" != "xno"; then cbindgen_version=$(cbindgen --version 2>&1 | cut -d' ' -f2-) - min_cbindgen_version="0.10.0" + min_cbindgen_version="0.16.0" AS_VERSION_COMPARE([$cbindgen_version], [$min_cbindgen_version], [cbindgen_ok="no"], [cbindgen_ok="yes"], @@ -2478,21 +2506,30 @@ return 0; AM_CONDITIONAL([HAS_FUZZLDFLAGS], [test "x$has_sanitizefuzzer" = "xyes"]) -# get revision - if test -f ./revision; then - REVISION=`cat ./revision` - AC_DEFINE_UNQUOTED([REVISION],[${REVISION}],[Git revision]) +# get git revision and last commit date + AC_PATH_PROG(HAVE_GIT_CMD, git, "no") + if test "$HAVE_GIT_CMD" != "no"; then + if [ test -e .git ]; then + REVISION=`git rev-parse --short HEAD` + LAST_COMMIT_DATE=`git log -1 --date=short --pretty=format:%cd` + REVISION="$REVISION $LAST_COMMIT_DATE" + AC_DEFINE_UNQUOTED([REVISION],[${REVISION}],[Git revision]) + fi + fi + +# Get the release date. If LAST_COMMIT_DATE was set in the previous +# step, use it, otherwise parse it from the ChangeLog. + AC_MSG_CHECKING([for release date]) + if test "x$LAST_COMMIT_DATE" != "x"; then + RELEASE_DATE=$LAST_COMMIT_DATE else - AC_PATH_PROG(HAVE_GIT_CMD, git, "no") - if test "$HAVE_GIT_CMD" != "no"; then - if [ test -d .git ]; then - REVISION=`git rev-parse --short HEAD` - DATE=`git log -1 --date=short --pretty=format:%cd` - REVISION="$REVISION $DATE" - AC_DEFINE_UNQUOTED([REVISION],[${REVISION}],[Git revision]) - fi + RELEASE_DATE=`awk '/^[[0-9\.]]+ -- [[0-9]][[0-9]][[0-9]][[0-9]]-[[0-9]][[0-9]]-[[0-9]][[0-9]]/ { print $3; exit }' $srcdir/ChangeLog` + if test "x$RELEASE_DATE" = "x"; then + AC_MSG_ERROR([Failed to determine release date]) fi fi + AC_MSG_RESULT([${RELEASE_DATE}]) + AC_SUBST(RELEASE_DATE) # get MAJOR_MINOR version for embedding in configuration file. MAJOR_MINOR=`expr "${PACKAGE_VERSION}" : "\([[0-9]]\+\.[[0-9]]\+\).*"` @@ -2605,7 +2642,7 @@ AC_SUBST(enable_non_bundled_htp) AM_CONDITIONAL([BUILD_SHARED_LIBRARY], [test "x$enable_shared" = "xyes"] && [test "x$can_build_shared_library" = "xyes"]) -AC_CONFIG_FILES(Makefile src/Makefile rust/Makefile rust/Cargo.lock rust/Cargo.toml rust/derive/Cargo.toml rust/.cargo/config) +AC_CONFIG_FILES(Makefile src/Makefile rust/Makefile rust/Cargo.lock rust/Cargo.toml rust/derive/Cargo.toml rust/.cargo/config.toml) AC_CONFIG_FILES(qa/Makefile qa/coccinelle/Makefile) AC_CONFIG_FILES(rules/Makefile doc/Makefile doc/userguide/Makefile) AC_CONFIG_FILES(contrib/Makefile contrib/file_processor/Makefile contrib/file_processor/Action/Makefile contrib/file_processor/Processor/Makefile) @@ -2641,6 +2678,8 @@ SURICATA_BUILD_CONF="Suricata Configuration: LUA support: ${enable_lua} libluajit: ${enable_luajit} GeoIP2 support: ${enable_geoip} + JA3 support: ${enable_ja3} + JA4 support: ${enable_ja4} Non-bundled htp: ${enable_non_bundled_htp} Hyperscan support: ${enable_hyperscan} Libnet support: ${enable_libnet} diff --git a/doc/userguide/Makefile.am b/doc/userguide/Makefile.am index bd157920cfac..53f4907eb743 100644 --- a/doc/userguide/Makefile.am +++ b/doc/userguide/Makefile.am @@ -31,7 +31,8 @@ EXTRA_DIST = \ setting-up-ipsinline-for-windows.rst \ support-status.rst \ unix-socket.rst \ - what-is-suricata.rst + what-is-suricata.rst \ + verifying-source-files.rst if HAVE_SURICATA_MAN dist_man1_MANS = suricata.1 suricatasc.1 suricatactl.1 suricatactl-filestore.1 @@ -74,6 +75,7 @@ userguide.pdf: _build/latex/Suricata.pdf pdf: userguide.pdf _build/man: manpages/suricata.rst manpages/suricatasc.rst manpages/suricatactl.rst manpages/suricatactl-filestore.rst + RELEASE_DATE=$(RELEASE_DATE) \ sysconfdir=$(sysconfdir) \ localstatedir=$(localstatedir) \ version=$(PACKAGE_VERSION) \ diff --git a/doc/userguide/capture-hardware/dpdk.rst b/doc/userguide/capture-hardware/dpdk.rst index 91ae1c876ca9..6be7278b8cbb 100644 --- a/doc/userguide/capture-hardware/dpdk.rst +++ b/doc/userguide/capture-hardware/dpdk.rst @@ -15,6 +15,57 @@ learn more about the basic setup for DPDK. The following sections contain examples of how to set up DPDK and Suricata for more obscure use-cases. +Hugepage analysis +----------------- + +Suricata can analyse utilized hugepages on the system. This can be particularly +beneficial when there's a potential overallocation of hugepages. +The hugepage analysis is designed to examine the hugepages in use and +provide recommendations on an adequate number of hugepages. This then ensures +Suricata operates optimally while leaving sufficient memory for other +applications on the system. The analysis works by comparing snapshots of the +hugepages before and after Suricata is initialized. After the initialization, +no more hugepages are allocated by Suricata. +The hugepage analysis can be seen in the Perf log level and is printed out +during the Suricata start. It is only printed when Suricata detects some +disrepancies in the system related to hugepage allocation. + +It's recommended to perform this analysis from a "clean" state - +that is a state when all your hugepages are free. It is especially recommended +when no other hugepage-dependent applications are running on your system. +This can be checked in one of two ways: + +.. code-block:: + + # global check + cat /proc/meminfo + + HugePages_Total: 1024 + HugePages_Free: 1024 + + # per-numa check depends on NUMA node ID, hugepage size, + # and nr_hugepages/free_hugepages - e.g.: + cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/free_hugepages + +After the termination of Suricata and other hugepage-related applications, +if the count of free hugepages is not equal with the total number of hugepages, +it indicates some hugepages were not freed completely. +This can be fixed by removing DPDK-related files from the hugepage-mounted +directory (filesystem). +It's important to exercise caution while removing hugepages, especially when +other hugepage-dependent applications are in operation, as this action will +disrupt their memory functionality. +Removing the DPDK files from the hugepage directory can often be done as: + +.. code-block:: bash + + sudo rm -rf /dev/hugepages/rtemap_* + + # To check where hugepages are mounted: + dpdk-hugepages.py -s + # or + mount | grep huge + Bond interface -------------- @@ -95,3 +146,41 @@ management and worker CPU set. - worker-cpu-set: cpu: [ 2,4,6,8 ] ... + +Interrupt (power-saving) mode +----------------------------- + +The DPDK is traditionally recognized for its polling mode operation. +In this mode, CPU cores are continuously querying for packets from +the Network Interface Card (NIC). While this approach offers benefits like +reduced latency and improved performance, it might not be the most efficient +in scenarios with sporadic or low traffic. +The constant polling can lead to unnecessary CPU consumption. +To address this, DPDK offers an `interrupt` mode. + +The obvious advantage that interrupt mode brings is power efficiency. +So far in our tests, we haven't observed a decrease in performance. Suricata's +performance has actually seen a slight improvement. +The (IPS runmode) users should be aware that interrupts can +introduce non-deterministic latency. However, the latency should never be +higher than in other (e.g. AF_PACKET/AF_XDP/...) capture methods. + +Interrupt mode in DPDK can be configured on a per-interface basis. +This allows for a hybrid setup where some workers operate in polling mode, +while others utilize the interrupt mode. +The configuration for the interrupt mode can be found and modified in the +DPDK section of the suricata.yaml file. + +Below is a sample configuration that demonstrates how to enable the interrupt mode for a specific interface: + +:: + + ... + dpdk: + eal-params: + proc-type: primary + + interfaces: + - interface: 0000:3b:00.0 + interrupt-mode: true + threads: 4 diff --git a/doc/userguide/capture-hardware/ebpf-xdp.rst b/doc/userguide/capture-hardware/ebpf-xdp.rst index 116038716eff..d9e3b2eeb04d 100644 --- a/doc/userguide/capture-hardware/ebpf-xdp.rst +++ b/doc/userguide/capture-hardware/ebpf-xdp.rst @@ -80,16 +80,15 @@ Make sure you have ``clang`` (>=3.9) installed on the system :: sudo apt install clang -Some i386 headers will also be needed as eBPF is not x86_64 and some included headers -are architecture specific :: - - sudo apt install libc6-dev-i386 --no-install-recommends - libbpf ~~~~~~ Suricata uses libbpf to interact with eBPF and XDP :: + sudo apt install libbpf-dev + +If the libbpf package is unavailable, it can be cloned from the repository :: + git clone https://github.com/libbpf/libbpf.git Now, you can build and install the library :: @@ -109,7 +108,7 @@ Compile and install Suricata To get Suricata source, you can use the usual :: git clone https://github.com/OISF/suricata.git - cd suricata && git clone https://github.com/OISF/libhtp.git -b 0.5.x + cd suricata && ./scripts/bundle.sh ./autogen.sh diff --git a/doc/userguide/conf.py b/doc/userguide/conf.py index cf87f19c311e..959744e88b7b 100644 --- a/doc/userguide/conf.py +++ b/doc/userguide/conf.py @@ -19,6 +19,10 @@ import subprocess import datetime +# Set 'today'. This will be used as the man page date. If an empty +# string todays date will be used. +today = os.environ.get('RELEASE_DATE', '') + on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # If extensions (or modules to document with autodoc) are in another directory, @@ -67,7 +71,7 @@ version = os.environ.get('version', None) if not version: version = re.search( - "AC_INIT\(\[suricata\],\s*\[(.*)?\]\)", + r"AC_INIT\(\[suricata\],\s*\[(.*)?\]\)", open("../../configure.ac").read()).groups()[0] if not version: version = "unknown" @@ -137,20 +141,15 @@ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] except: html_theme = 'default' - def setup(app): - if hasattr(app, 'add_css_file'): - app.add_css_file('css/suricata.css') - else: - app.add_stylesheet('css/suricata.css') else: html_theme = 'sphinx_rtd_theme' - html_context = { - 'css_files': [ - 'https://media.readthedocs.org/css/sphinx_rtd_theme.css', - 'https://media.readthedocs.org/css/readthedocs-doc-embed.css', - '_static/css/suricata.css', - ], - } + +# Add in our own stylesheet. +def setup(app): + if hasattr(app, 'add_css_file'): + app.add_css_file('css/suricata.css') + else: + app.add_stylesheet('css/suricata.css') # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the diff --git a/doc/userguide/configuration/suricata-yaml.rst b/doc/userguide/configuration/suricata-yaml.rst index c19ed48b3d0e..6eea5e879312 100644 --- a/doc/userguide/configuration/suricata-yaml.rst +++ b/doc/userguide/configuration/suricata-yaml.rst @@ -656,9 +656,9 @@ For setting the option sgh-mpm-context, you can choose from auto, full or single. The default setting is 'auto', meaning Suricata selects full or single based on the algorithm you use. 'Full' means that every group has its own MPM-context, and 'single' that all groups share one -MPM-context. The two algorithms ac and ac-gfbs are new in 1.03. These -algorithms use a single MPM-context if the Sgh-MPM-context setting is -'auto'. The rest of the algorithms use full in that case. +MPM-context. The algorithm "ac" uses a single MPM-context if the +Sgh-MPM-context setting is 'auto'. The rest of the algorithms use full +in that case. The inspection-recursion-limit option has to mitigate that possible bugs in Suricata cause big problems. Often Suricata has to deal with @@ -1256,6 +1256,37 @@ network inspection. .. image:: suricata-yaml/IDS_chunk_size.png + +Host Tracking +------------- + +.. _suricata-yaml-host-settings: + + +The Host table is used for tracking per IP address. This is used for tracking +per IP thresholding, per IP tagging, storing `iprep` data and storing `hostbit`. + +Settings +~~~~~~~~ + +The configuration allows specifying the following settings: `hash-size`, `prealloc` and `memcap`. + +.. code-block:: yaml + + host: + hash-size: 4096 + prealloc: 1000 + memcap: 32mb + +* `hash-size`: size of the hash table in number of rows +* `prealloc`: number of `Host` objects preallocated for efficiency +* `memcap`: max memory use for hosts, including the hash table size + +Hosts are evicted from the hash table by the Flow Manager thread when all +data in the host is expired (tag, threshold, etc). Hosts with iprep will +not expire. + + Application Layer Parsers ------------------------- @@ -1269,7 +1300,7 @@ the default behavior). Each supported protocol has a dedicated subsection under ``protocols``. -Asn1_max_frames (new in 1.0.3 and 1.1) +Asn1_max_frames ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Asn1 (`Abstract Syntax One @@ -1730,7 +1761,7 @@ incompatible with ``decode-mime``. If both are enabled, Maximum transactions ~~~~~~~~~~~~~~~~~~~~ -MQTT, FTP, PostgreSQL, SMB, DCERPC and NFS have each a `max-tx` parameter that can be customized. +SMTP, MQTT, FTP, PostgreSQL, SMB, DCERPC, HTTP1 and NFS have each a `max-tx` parameter that can be customized. `max-tx` refers to the maximum number of live transactions for each flow. An app-layer event `protocol.too_many_transactions` is triggered when this value is reached. The point of this parameter is to find a balance between the completeness of analysis @@ -1842,14 +1873,15 @@ Default Log Format ~~~~~~~~~~~~~~~~~~ A logging line exists of two parts. First it displays meta information -(thread id, date etc.), and finally the actual log message. Example: +(Log-level, Suricata module), and finally the actual log message. Example: :: - [27708] 15/10/2010 -- 11:40:07 - (suricata.c:425) (main) – This is Suricata version 1.0.2 + i: suricata: This is Suricata version 7.0.2 RELEASE running in USER mode -(Here the part until the – is the meta info, "This is Suricata 1.0.2" -is the actual message.) +(Here the part until the second `:` is the meta info, +"This is Suricata version 7.0.2 RELEASE running in USER mode" is the actual +message.) It is possible to determine which information will be displayed in this line and (the manner how it will be displayed) in which format it @@ -2098,7 +2130,11 @@ size of the cache is covered in the YAML file. To be able to run DPDK on Intel cards, it is required to change the default Intel driver to either `vfio-pci` or `igb_uio` driver. The process is described in `DPDK manual page regarding Linux drivers -`_. +`_. +The Intel NICs have the amount of RX/TX descriptors capped at 4096. +This should be possible to change by manually compiling the DPDK while +changing the value of respective macros for the desired drivers +(e.g. IXGBE_MAX_RING_DESC/I40E_MAX_RING_DESC). DPDK is natively supported by Mellanox and thus their NICs should work "out of the box". diff --git a/doc/userguide/index.rst b/doc/userguide/index.rst index e30de788fe03..1440fa82801a 100644 --- a/doc/userguide/index.rst +++ b/doc/userguide/index.rst @@ -34,3 +34,4 @@ This is the documentation for Suricata |version|. acknowledgements licenses/index.rst devguide/index.rst + verifying-source-files \ No newline at end of file diff --git a/doc/userguide/install.rst b/doc/userguide/install.rst index b3d39d216a0b..1f4a3d695291 100644 --- a/doc/userguide/install.rst +++ b/doc/userguide/install.rst @@ -16,10 +16,13 @@ Source Installing from the source distribution files gives the most control over the Suricata installation. +The Suricata source distribution files should be verified before building +the source, see :doc:`verifying-source-files`. + Basic steps:: - tar xzvf suricata-6.0.0.tar.gz - cd suricata-6.0.0 + tar xzvf suricata-7.0.5.tar.gz + cd suricata-7.0.5 ./configure make make install @@ -61,62 +64,19 @@ Common configure options Enables `DPDK `_ packet capture method. -Dependencies -^^^^^^^^^^^^ - -For Suricata's compilation you'll need the following libraries and their development headers installed:: - - libjansson, libpcap, libpcre2, libyaml, zlib - -The following tools are required:: - - make gcc (or clang) pkg-config rustc cargo - -Rust support:: - - rustc, cargo - - Some distros don't provide or provide outdated Rust packages. - Rust can also be installed directly from the Rust project itself:: - - 1) Install Rust https://www.rust-lang.org/en-US/install.html - 2) Install cbindgen - if the cbindgen is not found in the repository - or the cbindgen version is lower than required, it can be - alternatively installed as: cargo install --force cbindgen - 3) Make sure the cargo path is within your PATH environment - e.g. echo 'export PATH=”${PATH}:~/.cargo/bin”' >> ~/.bashrc - e.g. export PATH="${PATH}:/root/.cargo/bin" +Dependencies and compilation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Ubuntu/Debian """"""""""""" .. note:: The following instructions require ``sudo`` to be installed. -Minimal:: - - # Installed Rust and cargo as indicated above - sudo apt-get install build-essential git libjansson-dev libpcap-dev \ - libpcre2-dev libtool libyaml-dev make pkg-config zlib1g-dev - # On most distros installing cbindgen with package manager should be enough - sudo apt-get install cbindgen # alternative: cargo install --force cbindgen - -Recommended:: - - # Installed Rust and cargo as indicated above - sudo apt-get install autoconf automake build-essential ccache clang curl git \ - gosu jq libbpf-dev libcap-ng0 libcap-ng-dev libelf-dev \ - libevent-dev libgeoip-dev libhiredis-dev libjansson-dev \ - liblua5.1-dev libmagic-dev libnet1-dev libpcap-dev \ - libpcre2-dev libtool libyaml-0-2 libyaml-dev m4 make \ - pkg-config python3 python3-dev python3-yaml sudo zlib1g \ - zlib1g-dev - cargo install --force cbindgen - -Extra for iptables/nftables IPS integration:: - - sudo apt-get install libnetfilter-queue-dev libnetfilter-queue1 \ - libnetfilter-log-dev libnetfilter-log1 \ - libnfnetlink-dev libnfnetlink0 +.. literalinclude:: ../../scripts/docs-ubuntu-debian-minimal-build.sh + :caption: Minimal dependencies for Ubuntu/Debian + :language: bash + :start-after: # install-guide-documentation tag start: Minimal dependencies + :end-before: # install-guide-documentation tag end: Minimal dependencies CentOS, AlmaLinux, RockyLinux, Fedora, etc """""""""""""""""""""""""""""""""""""""""" @@ -128,46 +88,50 @@ repository in most distros. You can enable it possibly by one of the following ways:: sudo dnf -y update - sudo dnf -y install dnf-plugins-core - # AlmaLinux 8 + sudo dnf -y install epel-release dnf-plugins-core + # AlmaLinux 8 / RockyLinux 8 sudo dnf config-manager --set-enabled powertools - # AlmaLinux 9 + # AlmaLinux 9 / RockyLinux 9 sudo dnf config-manager --set-enable crb # Oracle Linux 8 sudo dnf config-manager --set-enable ol8_codeready_builder # Oracle Linux 9 sudo dnf config-manager --set-enable ol9_codeready_builder -Minimal:: - - # Installed Rust and cargo as indicated above - sudo dnf install -y gcc gcc-c++ git jansson-devel libpcap-devel libtool \ - libyaml-devel make pcre2-devel which zlib-devel - cargo install --force cbindgen - -Recommended:: - - # Installed Rust and cargo as indicated above - sudo dnf install -y autoconf automake diffutils file-devel gcc gcc-c++ git \ - jansson-devel jq libcap-ng-devel libevent-devel \ - libmaxminddb-devel libnet-devel libnetfilter_queue-devel \ - libnfnetlink-devel libpcap-devel libtool libyaml-devel \ - lua-devel lz4-devel make nss-devel pcre2-devel pkgconfig \ - python3-devel python3-sphinx python3-yaml sudo which \ - zlib-devel - cargo install --force cbindgen +.. literalinclude:: ../../scripts/docs-almalinux9-minimal-build.sh + :caption: Minimal dependencies for RPM-based distributions + :language: bash + :start-after: # install-guide-documentation tag start: Minimal RPM-based dependencies + :end-before: # install-guide-documentation tag end: Minimal RPM-based dependencies Compilation -^^^^^^^^^^^ +""""""""""" Follow these steps from your Suricata directory:: - ./scripts/bundle.sh - ./autogen.sh ./configure # you may want to add additional parameters here # ./configure --help to get all available parameters - make -j8 # j is for paralleling, you may de/increase depending on your CPU + # j is for adding concurrency to make; the number indicates how much + # concurrency so choose a number that is suitable for your build system + make -j8 make install # to install your Suricata compiled binary + # make install-full - installs configuration and rulesets as well + +Rust support +"""""""""""" + + Rust packages can be found in package managers but some distributions + don't provide Rust or provide outdated Rust packages. + In case of insufficient version you can install Rust directly + from the Rust project itself:: + + 1) Install Rust https://www.rust-lang.org/en-US/install.html + 2) Install cbindgen - if the cbindgen is not found in the repository + or the cbindgen version is lower than required, it can be + alternatively installed as: cargo install --force cbindgen + 3) Make sure the cargo path is within your PATH environment + echo 'export PATH="~/.cargo/bin:${PATH}"' >> ~/.bashrc + export PATH="~/.cargo/bin:${PATH}" Auto-Setup ^^^^^^^^^^ diff --git a/doc/userguide/output/custom-http-logging.rst b/doc/userguide/output/custom-http-logging.rst index f7d21adac5d8..e4ab4076436d 100644 --- a/doc/userguide/output/custom-http-logging.rst +++ b/doc/userguide/output/custom-http-logging.rst @@ -1,8 +1,6 @@ Custom http logging =================== -As of Suricata 1.3.1 you can enable a custom http logging option. - In your Suricata.yaml, find the http-log section and edit as follows: diff --git a/doc/userguide/output/eve/eve-json-format.rst b/doc/userguide/output/eve/eve-json-format.rst index a253e046cf7f..a2140149eb04 100644 --- a/doc/userguide/output/eve/eve-json-format.rst +++ b/doc/userguide/output/eve/eve-json-format.rst @@ -43,7 +43,307 @@ All the JSON log types share a common structure: :: - {"timestamp":"2009-11-24T21:27:09.534255","event_type":"TYPE", ...tuple... ,"TYPE":{ ... type specific content ... }} + {"timestamp":"2009-11-24T21:27:09.534255","flow_id":ID_NUMBER, "event_type":"TYPE", ...tuple... ,"TYPE":{ ... type specific content ... }} + +Field: flow_id +~~~~~~~~~~~~~~ + +Correlates the network protocol, flow logs EVE data and any evidence that +Suricata has logged to an ``alert`` event and that alert's metadata, as well as +to ``fileinfo``/file transaction and anomaly logs, if available. The same correlation +and logs are produced regardless if there is an alert, for any session/flow. + +The ability to correlate EVE logs belonging to a specific session/flow was +introduced in 2014 (see `commit f1185d051c21 `_). + +Further below, you can see several examples of events logged by Suricata: an +:ref:`alert` for an ``HTTP`` rule, ``fileinfo``, :ref:`http`, +:ref:`anomaly`, and :ref:`flow` events, all +easily correlated using the ``flow_id`` EVE field:: + + $ jq 'select(.flow_id==1676750115612680)' eve.json + +Event type: ``alert``:: + + { + "timestamp": "2023-09-18T06:13:41.532140+0000", + "flow_id": 1676750115612680, + "pcap_cnt": 130, + "event_type": "alert", + "src_ip": "142.11.240.191", + "src_port": 35361, + "dest_ip": "192.168.100.237", + "dest_port": 49175, + "proto": "TCP", + "pkt_src": "wire/pcap", + "ether": { + "src_mac": "52:54:00:36:3e:ff", + "dest_mac": "12:a9:86:6c:77:de" + }, + "tx_id": 1, + "alert": { + "action": "allowed", + "gid": 1, + "signature_id": 2045001, + "rev": 1, + "signature": "ET ATTACK_RESPONSE Win32/LeftHook Stealer Browser Extension Config Inbound", + "category": "A Network Trojan was detected", + "severity": 1, + "metadata": { + "affected_product": [ + "Windows_XP_Vista_7_8_10_Server_32_64_Bit" + ], + "attack_target": [ + "Client_Endpoint" + ], + "created_at": [ + "2023_04_17" + ], + "deployment": [ + "Perimeter" + ], + "former_category": [ + "ATTACK_RESPONSE" + ], + "signature_severity": [ + "Major" + ], + "updated_at": [ + "2023_04_18" + ] + } + }, + "http": { + "hostname": "142.11.240.191", + "http_port": 35361, + "url": "/", + "http_content_type": "text/xml", + "http_method": "POST", + "protocol": "HTTP/1.1", + "status": 200, + "length": 5362 + }, + "files": [ + { + "filename": "/", + "gaps": false, + "state": "CLOSED", + "stored": false, + "size": 5362, + "tx_id": 1 + } + ], + "app_proto": "http", + "direction": "to_client", + "flow": { + "pkts_toserver": 13, + "pkts_toclient": 12, + "bytes_toserver": 1616, + "bytes_toclient": 8044, + "start": "2023-09-18T06:13:33.324862+0000", + "src_ip": "192.168.100.237", + "dest_ip": "142.11.240.191", + "src_port": 49175, + "dest_port": 35361 + } + } + +Event type: ``fileinfo``:: + + { + "timestamp": "2023-09-18T06:13:33.903924+0000", + "flow_id": 1676750115612680, + "pcap_cnt": 70, + "event_type": "fileinfo", + "src_ip": "192.168.100.237", + "src_port": 49175, + "dest_ip": "142.11.240.191", + "dest_port": 35361, + "proto": "TCP", + "pkt_src": "wire/pcap", + "ether": { + "src_mac": "12:a9:86:6c:77:de", + "dest_mac": "52:54:00:36:3e:ff" + }, + "http": { + "hostname": "142.11.240.191", + "http_port": 35361, + "url": "/", + "http_content_type": "text/xml", + "http_method": "POST", + "protocol": "HTTP/1.1", + "status": 200, + "length": 212 + }, + "app_proto": "http", + "fileinfo": { + "filename": "/", + "gaps": false, + "state": "CLOSED", + "stored": false, + "size": 137, + "tx_id": 0 + } + } + +Event type: ``HTTP``:: + + { + "timestamp": "2023-09-18T06:13:33.903924+0000", + "flow_id": 1676750115612680, + "pcap_cnt": 70, + "event_type": "http", + "src_ip": "192.168.100.237", + "src_port": 49175, + "dest_ip": "142.11.240.191", + "dest_port": 35361, + "proto": "TCP", + "pkt_src": "wire/pcap", + "ether": { + "src_mac": "12:a9:86:6c:77:de", + "dest_mac": "52:54:00:36:3e:ff" + }, + "tx_id": 0, + "http": { + "hostname": "142.11.240.191", + "http_port": 35361, + "url": "/", + "http_content_type": "text/xml", + "http_method": "POST", + "protocol": "HTTP/1.1", + "status": 200, + "length": 212, + "request_headers": [ + { + "name": "Content-Type", + "value": "text/xml; charset=utf-8" + }, + { + "name": "SOAPAction", + "value": "\"http://tempuri.org/Endpoint/CheckConnect\"" + }, + { + "name": "Host", + "value": "142.11.240.191:35361" + }, + { + "name": "Content-Length", + "value": "137" + }, + { + "name": "Expect", + "value": "100-continue" + }, + { + "name": "Accept-Encoding", + "value": "gzip, deflate" + }, + { + "name": "Connection", + "value": "Keep-Alive" + } + ], + "response_headers": [ + { + "name": "Content-Length", + "value": "212" + }, + { + "name": "Content-Type", + "value": "text/xml; charset=utf-8" + }, + { + "name": "Server", + "value": "Microsoft-HTTPAPI/2.0" + }, + { + "name": "Date", + "value": "Mon, 18 Sep 2023 06:13:33 GMT" + } + ] + } + } + +Event type: ``anomaly``:: + + { + "timestamp": "2023-09-18T06:13:58.882971+0000", + "flow_id": 1676750115612680, + "pcap_cnt": 2878, + "event_type": "anomaly", + "src_ip": "192.168.100.237", + "src_port": 49175, + "dest_ip": "142.11.240.191", + "dest_port": 35361, + "proto": "TCP", + "pkt_src": "wire/pcap", + "ether": { + "src_mac": "12:a9:86:6c:77:de", + "dest_mac": "52:54:00:36:3e:ff" + }, + "tx_id": 3, + "anomaly": { + "app_proto": "http", + "type": "applayer", + "event": "UNABLE_TO_MATCH_RESPONSE_TO_REQUEST", + "layer": "proto_parser" + } + } + + +Event type: ``flow``:: + + { + "timestamp": "2023-09-18T06:13:21.216460+0000", + "flow_id": 1676750115612680, + "event_type": "flow", + "src_ip": "192.168.100.237", + "src_port": 49175, + "dest_ip": "142.11.240.191", + "dest_port": 35361, + "proto": "TCP", + "app_proto": "http", + "flow": { + "pkts_toserver": 3869, + "pkts_toclient": 1523, + "bytes_toserver": 3536402, + "bytes_toclient": 94102, + "start": "2023-09-18T06:13:33.324862+0000", + "end": "2023-09-18T06:14:13.752399+0000", + "age": 40, + "state": "closed", + "reason": "shutdown", + "alerted": true + }, + "ether": { + "dest_macs": [ + "52:54:00:36:3e:ff" + ], + "src_macs": [ + "12:a9:86:6c:77:de" + ] + }, + "tcp": { + "tcp_flags": "1e", + "tcp_flags_ts": "1e", + "tcp_flags_tc": "1a", + "syn": true, + "rst": true, + "psh": true, + "ack": true, + "state": "closed", + "ts_max_regions": 1, + "tc_max_regions": 1 + } + } + +.. note:: + It is possible to have even more detailed alert records, by enabling for + instance logging http-body, or alert metadata (:ref:`alert output`). + +Examples come from pcap found at https://app.any.run/tasks/ce7ca983-9e4b-4251-a7c3-fefa3da02ebe/. + Event types ~~~~~~~~~~~ @@ -86,6 +386,8 @@ generated the event. omitted from internal "pseudo" packets such as flow timeout packets. +.. _eve-format-alert: + Event type: Alert ----------------- @@ -191,6 +493,8 @@ If pcap log capture is active in `multi` mode, a `capture_file` key will be adde with value being the full path of the pcap file where the corresponding packets have been extracted. +.. _eve-format-anomaly: + Event type: Anomaly ------------------- @@ -304,6 +608,8 @@ Examples } } +.. _eve-format-http: + Event type: HTTP ---------------- @@ -735,12 +1041,13 @@ If extended logging is enabled the following fields are also included: * "fingerprint": The (SHA1) fingerprint of the TLS certificate * "sni": The Server Name Indication (SNI) extension sent by the client * "version": The SSL/TLS version used -* "not_before": The NotBefore field from the TLS certificate -* "not_after": The NotAfter field from the TLS certificate +* "notbefore": The NotBefore field from the TLS certificate +* "notafter": The NotAfter field from the TLS certificate * "ja3": The JA3 fingerprint consisting of both a JA3 hash and a JA3 string * "ja3s": The JA3S fingerprint consisting of both a JA3 hash and a JA3 string +* "ja4": The JA4 client fingerprint for TLS -JA3 must be enabled in the Suricata config file (set 'app-layer.protocols.tls.ja3-fingerprints' to 'yes'). +JA3 and JA4 must be enabled in the Suricata config file (set 'app-layer.protocols.tls.ja3-fingerprints'/'app-layer.protocols.tls.ja4-fingerprints' to 'yes'). In addition to this, custom logging also allows the following fields: @@ -1345,6 +1652,8 @@ Example of SSH logging: } } +.. _eve-format-flow: + Event type: Flow ---------------- @@ -2124,13 +2433,17 @@ Example of HTTP2 logging, of a request and response: Event type: PGSQL ----------------- -PGSQL eve-logs reflect the bidirectional nature of the protocol transactions. Each PGSQL event lists at most one -"Request" message field and one or more "Response" messages. - -The PGSQL parser merges individual messages into one EVE output item if they belong to the same transaction. In such cases, the source and destination information (IP/port) reflect the direction of the initial request, but contain messages from both sides. +PGSQL eve-logs reflect the bidirectional nature of the protocol transactions. +Each PGSQL event lists at most one "Request" message field and one or more +"Response" messages. +The PGSQL parser merges individual messages into one EVE output item if they +belong to the same transaction. In such cases, the source and destination +information (IP/port) reflect the direction of the initial request, but contain +messages from both sides. -Example of ``pgsql`` event for a SimpleQuery transaction complete with request with a ``SELECT`` statement and its response:: +Example of ``pgsql`` event for a SimpleQuery transaction complete with request +with a ``SELECT`` statement and its response:: { "timestamp": "2021-11-24T16:56:24.403417+0000", @@ -2156,51 +2469,80 @@ Example of ``pgsql`` event for a SimpleQuery transaction complete with request w } } -While on the wire PGSQL messages follow basically two types (startup messages and regular messages), those may have different subfields and/or meanings, based on the message type. Messages are logged based on their type and relevant fields. +While on the wire PGSQL messages follow basically two types (startup messages +and regular messages), those may have different subfields and/or meanings, based +on the message type. Messages are logged based on their type and relevant fields. -We list a few possible message types and what they mean in Suricata. For more details on message types and formats as well as what each message and field mean for PGSQL, check `PostgreSQL's official documentation `_. +We list a few possible message types and what they mean in Suricata. For more +details on message types and formats as well as what each message and field mean +for PGSQL, check `PostgreSQL's official documentation `_. Fields ~~~~~~ * "tx_id": internal transaction id. -* "request": each PGSQL transaction may have up to one request message. The possible messages will be described in another section. -* "response": even when there are several "Response" messages, there is one ``response`` field that summarizes all responses for that transaction. The possible messages will be described in another section. +* "request": each PGSQL transaction may have up to one request message. The + possible messages will be described in another section. +* "response": even when there are several "Response" messages, there is one + ``response`` field that summarizes all responses for that transaction. The + possible messages will be described in another section. Request Messages ~~~~~~~~~~~~~~~~ -Some of the possible request messages are: - -* "startup_message": message sent by a frontend/client process to start a new PostgreSQL connection -* "password_message": if password output for PGSQL is enabled in suricata.yaml, carries the password sent during Authentication phase -* "simple_query": issued SQL command during simple query subprotocol. PostgreSQL identifies specific sets of commands that change the set of expected messages to be exchanged as subprotocols. -* "message": frontend responses which do not have meaningful payloads are logged like this, where the field value is the message type - -There are several different authentication messages possible, based on selected authentication method. (e.g. the SASL authentication will have a set of authentication messages different from when ``md5`` authentication is chosen). +Requests are sent by the frontend (client), which would be the source of a pgsql +flow. Some of the possible request messages are: + +* "startup_message": message sent to start a new PostgreSQL connection +* "password_message": if password output for PGSQL is enabled in suricata.yaml, + carries the password sent during Authentication phase +* "simple_query": issued SQL command during simple query subprotocol. PostgreSQL + identifies specific sets of commands that change the set of expected messages + to be exchanged as subprotocols. +* ``"message": "cancel_request"``: sent after a query, when the frontend + attempts to cancel said query. This message is sent over a different port, + thus bring shown as a different flow. It has no direct answer from the + backend, but if successful will lead to an ``ErrorResponse`` in the + transaction where the query was sent. +* "message": requests which do not have meaningful payloads are logged like this, + where the field value is the message type + +There are several different authentication messages possible, based on selected +authentication method. (e.g. the SASL authentication will have a set of +authentication messages different from when ``md5`` authentication is chosen). Response Messages ~~~~~~~~~~~~~~~~~ -Some of the possible request messages are: +Responses are sent by the backend (server), which would be the destination of a +pgsql flow. Some of the possible request messages are: -* "authentication_sasl_final": final SCRAM ``server-final-message``, as explained at https://www.postgresql.org/docs/14/sasl-authentication.html#SASL-SCRAM-SHA-256 -* "message": Backend responses which do not have meaningful payloads are logged like this, where the field value is the message type +* "authentication_sasl_final": final SCRAM ``server-final-message``, as explained + at https://www.postgresql.org/docs/14/sasl-authentication.html#SASL-SCRAM-SHA-256 +* "message": Backend responses which do not have meaningful payloads are logged + like this, where the field value is the message type * "error_response" * "notice_response" * "notification_response" * "authentication_md5_password": a string with the ``md5`` salt value * "parameter_status": logged as an array * "backend_key_data" -* "data_rows": integer. When one or many ``DataRow`` messages are parsed, the total returned rows -* "data_size": in bytes. When one or many ``DataRow`` messages are parsed, the total size in bytes of the data returned +* "data_rows": integer. When one or many ``DataRow`` messages are parsed, the + total returned rows +* "data_size": in bytes. When one or many ``DataRow`` messages are parsed, the + total size in bytes of the data returned * "command_completed": string. Informs the command just completed by the backend -* "ssl_accepted": bool. With this event, the initial PGSQL SSL Handshake negotiation is complete in terms of tracking and logging. The session will be upgraded to use TLS encryption +* "ssl_accepted": bool. With this event, the initial PGSQL SSL Handshake + negotiation is complete in terms of tracking and logging. The session will be + upgraded to use TLS encryption Examples ~~~~~~~~ -The two ``pgsql`` events in this example represent a rejected ``SSL handshake`` and a following connection request where the authentication method indicated by the backend was ``md5``:: +The two ``pgsql`` events in this example represent a rejected ``SSL handshake`` +and a following connection request where the authentication method indicated by +the backend was ``md5``:: { "timestamp": "2021-11-24T16:56:19.435242+0000", @@ -2255,6 +2597,97 @@ The two ``pgsql`` events in this example represent a rejected ``SSL handshake`` } } +``AuthenticationOk``: a response indicating that the connection was successfully +established.:: + + { + "pgsql": { + "tx_id": 3, + "response": { + "message": "authentication_ok", + "parameter_status": [ + { + "application_name": "psql" + }, + { + "client_encoding": "UTF8" + }, + { + "date_style": "ISO, MDY" + }, + { + "integer_datetimes": "on" + }, + { + "interval_style": "postgres" + }, + { + "is_superuser": "on" + }, + { + "server_encoding": "UTF8" + }, + { + "server_version": "13.6 (Debian 13.6-1.pgdg110+1)" + }, + { + "session_authorization": "rules" + }, + { + "standard_conforming_strings": "on" + }, + { + "time_zone": "Etc/UTC" + } + ], + "process_id": 28954, + "secret_key": 889887985 + } + } + } + +.. note:: + In Suricata, the ``AuthenticationOk`` message is also where the backend's + ``process_id`` and ``secret_key`` are logged. These must be sent by the + frontend when it issues a ``CancelRequest`` message (seen below). + +A ``CancelRequest`` message:: + + { + "timestamp": "2023-12-07T15:46:56.971150+0000", + "flow_id": 775771889500133, + "event_type": "pgsql", + "src_ip": "100.88.2.140", + "src_port": 39706, + "dest_ip": "100.96.199.113", + "dest_port": 5432, + "proto": "TCP", + "pkt_src": "stream (flow timeout)", + "pgsql": { + "tx_id": 1, + "request": { + "message": "cancel_request", + "process_id": 28954, + "secret_key": 889887985 + } + } + } + +.. note:: + As the ``CancelRequest`` message is sent over a new connection, the way to + correlate it with the proper frontend/flow from which it originates is by + querying on ``process_id`` and ``secret_key`` seen in the + ``AuthenticationOk`` event. + +References: + * `PostgreSQL protocol - Canceling Requests in Progress`_ + * `PostgreSQL message format - BackendKeyData`_ + +.. _PostgreSQL protocol - Canceling Requests in Progress: https://www.postgresql + .org/docs/current/protocol-flow.html#PROTOCOL-FLOW-CANCELING-REQUESTS +.. _PostgreSQL message format - BackendKeyData: https://www.postgresql.org/docs + /current/protocol-message-formats.html#PROTOCOL-MESSAGE-FORMATS-BACKENDKEYDATA + Event type: IKE --------------- @@ -2483,11 +2916,14 @@ Fields * "cyu": List of found CYUs in the packet * "cyu[].hash": CYU hash * "cyu[].string": CYU string +* "ja3": The JA3 fingerprint consisting of both a JA3 hash and a JA3 string +* "ja3s": The JA3S fingerprint consisting of both a JA3 hash and a JA3 string +* "ja4": The JA4 client fingerprint for QUIC Examples ~~~~~~~~ -Example of QUIC logging with a CYU hash: +Example of QUIC logging with CYU, JA3 and JA4 hashes (note that the JA4 hash is only an example to illustrate the format and does not correlate with the others): :: @@ -2499,7 +2935,12 @@ Example of QUIC logging with a CYU hash: "hash": "7b3ceb1adc974ad360cfa634e8d0a730", "string": "46,PAD-SNI-STK-SNO-VER-CCS-NONC-AEAD-UAID-SCID-TCID-PDMD-SMHL-ICSL-NONP-PUBS-MIDS-SCLS-KEXS-XLCT-CSCT-COPT-CCRT-IRTT-CFCW-SFCW" } - ] + ], + "ja3": { + "hash": "324f8c50e267adba4b5dd06c964faf67", + "string": "771,4865-4866-4867,51-43-13-27-17513-16-45-0-10-57,29-23-24," + }, + "ja4": "q13d0310h3_55b375c5d22e_cd85d2d88918" } Event type: DHCP diff --git a/doc/userguide/output/eve/eve-json-output.rst b/doc/userguide/output/eve/eve-json-output.rst index 512672f87d94..364a80418d39 100644 --- a/doc/userguide/output/eve/eve-json-output.rst +++ b/doc/userguide/output/eve/eve-json-output.rst @@ -53,6 +53,8 @@ Output types:: # enabled: yes ## set enable to yes to enable query pipelining # batch-size: 10 ## number of entry to keep in buffer +.. _eve-output-alert: + Alerts ~~~~~~ @@ -257,7 +259,7 @@ YAML:: extended: yes # enable this for extended logging information # custom allows to control which tls fields that are included # in eve-log - #custom: [subject, issuer, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s] + #custom: [subject, issuer, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s, ja4] The default is to log certificate subject and issuer. If ``extended`` is enabled, then the log gets more verbose. diff --git a/doc/userguide/partials/eve-log.yaml b/doc/userguide/partials/eve-log.yaml index 96522571e0bb..4c8e4f6704e3 100644 --- a/doc/userguide/partials/eve-log.yaml +++ b/doc/userguide/partials/eve-log.yaml @@ -133,6 +133,9 @@ outputs: # output TLS transaction where the session is resumed using a # session id #session-resumption: no + # ja4 hashes in tls records will never be logged unless + # the following is set to on. (Default off) + # ja4: off # custom allows to control which tls fields that are included # in eve-log #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain] @@ -164,6 +167,10 @@ outputs: - ike # BitTorrent DHT logging. - bittorrent-dht + - quic: + # ja4 hashes in crecords will never be logged unless + # the following is set to on. (Default off) + # ja4: off - ssh - stats: totals: yes # stats for all threads merged together diff --git a/doc/userguide/rules/base64-keywords.rst b/doc/userguide/rules/base64-keywords.rst index 7daf0c2603a9..190fdb5bf1cb 100644 --- a/doc/userguide/rules/base64-keywords.rst +++ b/doc/userguide/rules/base64-keywords.rst @@ -62,3 +62,5 @@ Example:: alert http any any -> any any (msg:"Example"; content:"somestring"; http_uri; \ base64_decode:bytes 8, offset 1, relative; \ base64_data; content:"test"; sid:10001; rev:1;) + +.. note:: ``fast_pattern`` is ineffective with ``base64_data`` diff --git a/doc/userguide/rules/fast-pattern-explained.rst b/doc/userguide/rules/fast-pattern-explained.rst index 5ee45e3e293c..88f0f3b33173 100644 --- a/doc/userguide/rules/fast-pattern-explained.rst +++ b/doc/userguide/rules/fast-pattern-explained.rst @@ -17,25 +17,23 @@ The fast_pattern selection criteria are as follows: #. Suricata first identifies all content matches that have the highest "priority" that are used in the signature. The priority is based - off of the buffer being matched on and generally 'http_*' buffers - have a higher priority (lower number is higher priority). See - :ref:`Appendix B ` for details - on which buffers have what priority. + off of the buffer being matched on and generally application layer buffers + have a higher priority (lower number is higher priority). The buffer + `http_method` is an exception and has lower priority than the general + `content` buffer. #. Within the content matches identified in step 1 (the highest priority content matches), the longest (in terms of character/byte length) content match is used as the fast pattern match. #. If multiple content matches have the same highest priority and qualify for the longest length, the one with the highest character/byte diversity score ("Pattern Strength") is used as the - fast pattern match. See :ref:`Appendix C - ` for details on the algorithm + fast pattern match. See :ref:`Appendix A + ` for details on the algorithm used to determine Pattern Strength. #. If multiple content matches have the same highest priority, qualify for the longest length, and the same highest Pattern Strength, the buffer ("list_id") that was *registered last* is used as the fast - pattern match. See :ref:`Appendix B - ` for the registration order of - the different buffers/lists. + pattern match. #. If multiple content matches have the same highest priority, qualify for the longest length, the same highest Pattern Strength, and have the same list_id (i.e. are looking in the same buffer), then the @@ -52,63 +50,7 @@ Appendices .. _fast-pattern-explained-appendix-a: -Appendix A - Buffers, list_id values, and Registration Order for Suricata 1.3.4 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This should be pretty much the same for Suricata 1.1.x - 1.4.x. - -======= ============================== ======================== ================== -list_id Content Modifier Keyword Buffer Name Registration Order -======= ============================== ======================== ================== -1 (regular content match) DETECT_SM_LIST_PMATCH 1 (first) -2 http_uri DETECT_SM_LIST_UMATCH 2 -6 http_client_body DETECT_SM_LIST_HCBDMATCH 3 -7 http_server_body DETECT_SM_LIST_HSBDMATCH 4 -8 http_header DETECT_SM_LIST_HHDMATCH 5 -9 http_raw_header DETECT_SM_LIST_HRHDMATCH 6 -10 http_method DETECT_SM_LIST_HMDMATCH 7 -11 http_cookie DETECT_SM_LIST_HCDMATCH 8 -12 http_raw_uri DETECT_SM_LIST_HRUDMATCH 9 -13 http_stat_msg DETECT_SM_LIST_HSMDMATCH 10 -14 http_stat_code DETECT_SM_LIST_HSCDMATCH 11 -15 http_user_agent DETECT_SM_LIST_HUADMATCH 12 (last) -======= ============================== ======================== ================== - -Note: registration order doesn't matter when it comes to determining the fast pattern match for Suricata 1.3.4 but list_id value does. - -.. _fast-pattern-explained-appendix-b: - -Appendix B - Buffers, list_id values, Priorities, and Registration Order for Suricata 2.0.7 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This should be pretty much the same for Suricata 2.0.x. - -========================================== ================== ============================== ============================= ======= -Priority (lower number is higher priority) Registration Order Content Modifier Keyword Buffer Name list_id -========================================== ================== ============================== ============================= ======= -3 11 (regular content match) DETECT_SM_LIST_PMATCH 1 -3 12 http_method DETECT_SM_LIST_HMDMATCH 12 -3 13 http_stat_code DETECT_SM_LIST_HSCDMATCH 9 -3 14 http_stat_msg DETECT_SM_LIST_HSMDMATCH 8 -2 1 (first) http_client_body DETECT_SM_LIST_HCBDMATCH 4 -2 2 http_server_body DETECT_SM_LIST_HSBDMATCH 5 -2 3 http_header DETECT_SM_LIST_HHDMATCH 6 -2 4 http_raw_header DETECT_SM_LIST_HRHDMATCH 7 -2 5 http_uri DETECT_SM_LIST_UMATCH 2 -2 6 http_raw_uri DETECT_SM_LIST_HRUDMATCH 3 -2 7 http_host DETECT_SM_LIST_HHHDMATCH 10 -2 8 http_raw_host DETECT_SM_LIST_HRHHDMATCH 11 -2 9 http_cookie DETECT_SM_LIST_HCDMATCH 13 -2 10 http_user_agent DETECT_SM_LIST_HUADMATCH 14 -2 15 (last) dns_query DETECT_SM_LIST_DNSQUERY_MATCH 20 -========================================== ================== ============================== ============================= ======= - -Note: list_id value doesn't matter when it comes to determining the -fast pattern match for Suricata 2.0.7 but registration order does. - -.. _fast-pattern-explained-appendix-c: - -Appendix C - Pattern Strength Algorithm +Appendix A - Pattern Strength Algorithm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From detect-engine-mpm.c. Basically the Pattern Strength "score" diff --git a/doc/userguide/rules/file-keywords.rst b/doc/userguide/rules/file-keywords.rst index a9b24deafe07..c708ee746c0d 100644 --- a/doc/userguide/rules/file-keywords.rst +++ b/doc/userguide/rules/file-keywords.rst @@ -5,6 +5,36 @@ Suricata comes with several rule keywords to match on various file properties. They depend on properly configured :doc:`../file-extraction/file-extraction`. +file.data +--------- + +The ``file.data`` sticky buffer matches on contents of files that are +seen in flows that Suricata evaluates. The various payload keywords can +be used (e.g. ``startswith``, ``nocase`` and ``bsize``) with ``file.data``. + +Example:: + + alert smtp any any -> any any (msg:"smtp app layer file.data example"; \ + file.data; content:"example file content"; sid:1; rev:1) + + alert http any any -> any any (msg:"http app layer file.data example"; \ + file.data; content:"example file content"; sid:2; rev:1) + + alert http2 any any -> any any (msg:"http2 app layer file.data example"; \ + file.data; content:"example file content"; sid:3; rev:1;) + + alert nfs any any -> any any (msg:"nfs app layer file.data example"; \ + file.data; content:" "; sid:5; rev:1) + + alert ftp-data any any -> any any (msg:"ftp app layer file.data example"; \ + file.data; content:"example file content"; sid:6; rev:1;) + + alert tcp any any -> any any (msg:"tcp file.data example"; \ + file.data; content:"example file content"; sid:4; rev:1) + +**Note** file_data is the legacy notation but can still be used. + + file.name --------- diff --git a/doc/userguide/rules/http-keywords.rst b/doc/userguide/rules/http-keywords.rst index 001c0f542e84..0c0f652ad397 100644 --- a/doc/userguide/rules/http-keywords.rst +++ b/doc/userguide/rules/http-keywords.rst @@ -838,7 +838,7 @@ Notes than 1k, 'content:!" -category: the category short name +``category``: the category short name -operator: <, >, = +``operator``: <, <=, >, >=, = -reputation score: 1-127 +``reputation score``: 0-127 Example: :: - alert ip $HOME_NET any -> any any (msg:"IPREP internal host talking to CnC server"; flow:to_server; iprep:dst,CnC,>,30; sid:1; rev:1;) -This rule will alert when a system in $HOME_NET acts as a client while communicating with any IP in the CnC category that has a reputation score set to greater than 30. +This rule will alert when a system in ``$HOME_NET`` acts as a client while communicating with any IP in the CnC category that has a reputation score set to greater than 30. + +isset and isnotset +~~~~~~~~~~~~~~~~~~ + +``isset`` and ``isnotset`` can be used to test reputation "membership" + +:: + + iprep:,, + + +``side to check``: + +``category``: the category short name + +To test whether an IP is part of an iprep set at all, the ``isset`` can be used. It acts as a ``>=,0`` statement. + +.. container:: example-rule + + drop ip $HOME_NET any -> any any (:example-rule-options:`iprep:src,known-bad-hosts,isset;` sid:1;) + +In this example traffic to any IP with a score in ``known-bad-hosts`` would be blocked. + +``isnotset`` can be used to test if an IP is not a part of the set. + +.. container:: example-rule + + drop ip $HOME_NET any -> any any (:example-rule-options:`iprep:src,trusted-hosts,isnotset;` sid:1;) + +In this example traffic for a host w/o a trust score would be blocked. IP-only ~~~~~~~ diff --git a/doc/userguide/rules/ja3-keywords.rst b/doc/userguide/rules/ja-keywords.rst similarity index 60% rename from doc/userguide/rules/ja3-keywords.rst rename to doc/userguide/rules/ja-keywords.rst index c77b9f390635..6e80efd2553e 100644 --- a/doc/userguide/rules/ja3-keywords.rst +++ b/doc/userguide/rules/ja-keywords.rst @@ -1,9 +1,16 @@ -JA3 Keywords -============ +JA3/JA4 Keywords +================ -Suricata comes with a JA3 integration (https://github.com/salesforce/ja3). JA3 is used to fingerprint TLS clients. +Suricata comes with JA3 (https://github.com/salesforce/ja3) and +JA4 (https://github.com/FoxIO-LLC/ja4) integration. +JA3 and JA4 are used to fingerprint TLS and QUIC clients. -JA3 must be enabled in the Suricata config file (set 'app-layer.protocols.tls.ja3-fingerprints' to 'yes'). +Support must be enabled in the Suricata config file (set +``app-layer.protocols.tls.ja{3,4}-fingerprints`` to ``yes``). If it is not +explicitly disabled (``no``) , it will be enabled if a loaded rule requires it. +Note that JA3/JA4 support can also be disabled at compile time; it is possible to +use the ``requires: feature ja{3,4};`` keyword to skip rules if no JA3/JA4 support is +present. ja3.hash -------- @@ -71,3 +78,19 @@ Example:: ``ja3s.string`` is a 'sticky buffer'. ``ja3s.string`` can be used as ``fast_pattern``. + +ja4.hash +-------- + +Match on JA4 hash (e.g. ``q13d0310h3_55b375c5d22e_cd85d2d88918``). + +Example:: + + alert quic any any -> any any (msg:"match JA4 hash"; \ + ja4.hash; content:"q13d0310h3_55b375c5d22e_cd85d2d88918"; \ + sid:100001;) + +``ja4.hash`` is a 'sticky buffer'. + +``ja4.hash`` can be used as ``fast_pattern``. + diff --git a/doc/userguide/rules/meta.rst b/doc/userguide/rules/meta.rst index 06e5040e73a5..1ceb5fe834e0 100644 --- a/doc/userguide/rules/meta.rst +++ b/doc/userguide/rules/meta.rst @@ -211,3 +211,51 @@ The format is:: If the value is src_ip then the source IP in the generated event (src_ip field in JSON) is the target of the attack. If target is set to dest_ip then the target is the destination IP in the generated event. + +requires +-------- + +The ``requires`` keyword allows a rule to require specific Suricata +features to be enabled, or the Suricata version to match an +expression. Rules that do not meet the requirements will by ignored, +and Suricata will not treat them as errors. + +When parsing rules, the parser attempts to process the ``requires`` +keywords before others. This allows it to occur after keywords that +may only be present in specific versions of Suricata, as specified by +the ``requires`` statement. However, the keywords preceding it must +still adhere to the basic known formats of Suricata rules. + +The format is:: + + requires: feature geoip, version >= 7.0.0 + +To require multiple features, the feature sub-keyword must be +specified multiple times:: + + requires: feature geoip, feature lua + +Alternatively, *and* expressions may be expressed like:: + + requires: version >= 7.0.4 < 8 + +and *or* expressions may expressed with ``|`` like:: + + requires: version >= 7.0.4 < 8 | >= 8.0.3 + +to express that a rules requires version 7.0.4 or greater, but less +than 8, **OR** greater than or equal to 8.0.3. Which could be useful +if a keyword wasn't added until 7.0.4 and the 8.0.3 patch releases, as +it would not exist in 8.0.1. + +This can be extended to multiple release branches:: + + requires: version >= 7.0.10 < 8 | >= 8.0.5 < 9 | >= 9.0.3 + +If no *minor* or *patch* version component is provided, it will +default to 0. + +The ``version`` may only be specified once, if specified more than +once the rule will log an error and not be loaded. + +The ``requires`` keyword was introduced in Suricata 7.0.3 and 8.0.0. diff --git a/doc/userguide/rules/noalert.rst b/doc/userguide/rules/noalert.rst new file mode 100644 index 000000000000..51dfe5de1fa0 --- /dev/null +++ b/doc/userguide/rules/noalert.rst @@ -0,0 +1,39 @@ +Alert Keywords +============== + +.. role:: example-rule-options + +In addition to the action, alerting behavior can be controlled in the rule body using the ``noalert`` and ``alert`` keywords. +Additionally, alerting behavior is controlled by :doc:`thresholding`. + +noalert +------- + +A rule that specifies ``noalert`` will not generate an alert when it matches, but rule actions will still be performed. + +``noalert`` is often used in rules that set a ``flowbit`` for common patterns. + +``noalert`` is meant for use with rule actions ``alert``, ``drop``, ``reject`` that all explicitly or implicitly include ``alert``. + +.. container:: example-rule + + alert http any any -> any any (http.user_agent; content:"Mozilla/5.0"; startwith; endswith; \ + flowbits:set,mozilla-ua; :example-rule-options:`noalert;` sid:1;) + +This example sets a flowbit "mozilla-ua" on matching, but does not generate an alert due to the presence of ``noalert``. + +.. note:: this option is also used as ``flowbits:noalert;``, see :doc:`flow-keywords` + +alert +----- + +A rule that specifies ``alert`` will generate an alert, even if the rule action doesn't imply alerting. + +This keyword can be used to implement an "alert then pass"-logic. + +.. container:: example-rule + + pass http any any -> any any (http.user_agent; content:"Mozilla/5.0"; startwith; endswith; \ + :example-rule-options:`alert;` sid:1;) + +This example would pass the rest of the HTTP flow with the Mozilla/5.0 user-agent, generating an alert for the "pass" event. diff --git a/doc/userguide/rules/payload-keywords.rst b/doc/userguide/rules/payload-keywords.rst index 412f7b4fe0e4..0f0e2ba97cc8 100644 --- a/doc/userguide/rules/payload-keywords.rst +++ b/doc/userguide/rules/payload-keywords.rst @@ -282,7 +282,7 @@ precision to the content match, previously this could have been done with ``isda An optional operator can be specified; if no operator is present, the operator will default to '='. When a relational operator is used, e.g., '<', '>' or '<>' (range), -the bsize value will be compared using the relational operator. Ranges are inclusive. +the bsize value will be compared using the relational operator. Ranges are exclusive. If one or more ``content`` keywords precedes ``bsize``, each occurrence of ``content`` will be inspected and an error will be raised if the content length and the bsize @@ -325,6 +325,9 @@ Examples of ``bsize`` in a rule: alert dns any any -> any any (msg:"test bsize rule"; dns.query; content:"middle"; bsize:6<>15; sid:126; rev:1;) +To emphasize how range works: in the example above, a match will occur if +``bsize`` is greater than 6 and less than 15. + dsize ----- @@ -412,23 +415,23 @@ Example:: alert tcp any any -> any any \ (msg:"Byte_Test Example - Num = Value"; \ - content:"|00 01 00 02|"; byte_test:2,=,0x01;) + content:"|00 01 00 02|"; byte_test:2,=,0x01,0;) alert tcp any any -> any any \ (msg:"Byte_Test Example - Num = Value relative to content"; \ - content:"|00 01 00 02|"; byte_test:2,=,0x03,relative;) + content:"|00 01 00 02|"; byte_test:2,=,0x03,2,relative;) alert tcp any any -> any any \ (msg:"Byte_Test Example - Num != Value"; content:"|00 01 00 02|"; \ - byte_test:2,!=,0x06;) + byte_test:2,!=,0x06,0;) alert tcp any any -> any any \ (msg:"Byte_Test Example - Detect Large Values"; content:"|00 01 00 02|"; \ - byte_test:2,>,1000,relative;) + byte_test:2,>,1000,1,relative;) alert tcp any any -> any any \ (msg:"Byte_Test Example - Lowest bit is set"; \ - content:"|00 01 00 02|"; byte_test:2,&,0x01,relative;) + content:"|00 01 00 02|"; byte_test:2,&,0x01,12,relative;) alert tcp any any -> any any (msg:"Byte_Test Example - Compare to String"; \ content:"foobar"; byte_test:4,=,1337,1,relative,string,dec;) @@ -826,7 +829,7 @@ Suricata has its own specific pcre modifiers. These are: .. _pcre-update-v1-to-v2: Changes from PCRE1 to PCRE2 -=========================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~ The upgrade from PCRE1 to PCRE2 changes the behavior for some PCRE expressions. diff --git a/doc/userguide/rules/tag.rst b/doc/userguide/rules/tag.rst new file mode 100644 index 000000000000..1b057cb6bd81 --- /dev/null +++ b/doc/userguide/rules/tag.rst @@ -0,0 +1,133 @@ +Tag +=== + +The `tag` keyword allows tagging of the current and future packets. + +Tagged packets can be logged in `EVE` and conditional PCAP logging. + +Tagging is limited to a scope: `host` or `session` (flow). When using `host` a +direction can be specified: `src` or `dst`. Tagging will then occur based on the +`src` or `dst` IP address of the packet generating the alert. + +Tagging is further controlled by count: `packets`, `bytes` or `seconds`. If the +count is ommited built-in defaults will be used: + +- for `session`: 256 packets +- for `host`: 256 packets for the destination IP of the packet triggering the alert + +The `tag` keyword can appear multiple times in a rule. + +Syntax +~~~~~~ + +:: + + tag:[,, [,]]; + +Values for `scope`: `session` and `host` +Values for `metric`: `packets`, `bytes`, `seconds` +Values for `direction`: `src` and `dst` + +.. note:: "direction" can only be specified if scope is "host" and both "count" + and "metric" are also specified. + +Examples +~~~~~~~~ + +Keyword:: + + tag:session; # tags next 256 packets in the flow + tag:host; # tags next 256 packets for the dst ip of the alert + tag:host,100,packets,src; # tags next 100 packets for src ip of the alert + tag:host,3600,seconds,dst; # tags packets for dst host for the next hour + +Full rule examples: + +.. container:: example-rule + + alert dns any any -> any any (dns.query; content:"evil"; tag:host,60,seconds,src; sid:1;) + +.. container:: example-rule + + alert http any any -> any any (http.method; content:"POST"; tag:session; sid:1;) + +How to Use Tags +~~~~~~~~~~~~~~~ + +EVE +""" + +Tags can be set to generate `EVE` `tag` records: + +.. code-block:: yaml + + outputs: + - eve-log: + enabled: yes + filename: eve.json + types: + - alert: + tagged-packets: true + +The tagged packets will then be logged with `event_type`: `packet`: + +.. code-block:: json + + { + "timestamp": "2020-06-03T10:29:17.850417+0000", + "flow_id": 1576832511820424, + "event_type": "packet", + "src_ip": "192.168.0.27", + "src_port": 54634, + "dest_ip": "192.168.0.103", + "dest_port": 22, + "proto": "TCP", + "pkt_src": "wire/pcap", + "packet": "CAAn6mWJAPSNvfrHCABFAAAogkVAAIAG9rfAqAAbwKgAZ9VqABZvnJXH5Zf6aFAQEAljEwAAAAAAAAAA", + "packet_info": { + "linktype": 1 + } + } + +EVE: :ref:`Eve JSON Output ` + +Conditional PCAP Logging +"""""""""""""""""""""""" + +Using the conditional PCAP logging option the tag keyword can control which +packets are logged by the PCAP logging. + +.. code-block:: yaml + + outputs: + - pcap-log: + enabled: yes + filename: log.pcap + limit: 1000mb + max-files: 2000 + compression: none + mode: normal + use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets + honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stop being logged. + # Use "all" to log all packets or use "alerts" to log only alerted packets and flows or "tag" + # to log only flow tagged via the "tag" keyword + conditional: tag + +PCAP Logging: :ref:`PCAP log ` + +Tracking by Host/Flow +~~~~~~~~~~~~~~~~~~~~~ + +When the tags are using the `session` scope, the tag is added to the +`Flow` structure. If a packet has no flow, no tagging will happen. No +errors/warnings are generated for this. + +See :ref:`Flow Settings ` for managing flow +limits and resources. + +When tags are using the `host` scope, the tag is stored with a `Host` +object in the host table. The Host table size will affect effectiveness +of per host tags. + +See :ref:`Host Settings ` for managing host +table size. diff --git a/doc/userguide/rules/transforms.rst b/doc/userguide/rules/transforms.rst index 91ab2ef5fc9e..f730f0d2dc71 100644 --- a/doc/userguide/rules/transforms.rst +++ b/doc/userguide/rules/transforms.rst @@ -68,6 +68,18 @@ compress_whitespace Compresses all consecutive whitespace into a single space. +to_lowercase +------------ + +Converts the buffer to lowercase and passes the value on. + +This example alerts if ``http.uri`` contains ``this text has been converted to lowercase`` + +Example:: + + alert http any any -> any any (http.uri; to_lowercase; \ + content:"this text has been converted to lowercase"; sid:1;) + to_md5 ------ @@ -79,6 +91,18 @@ Example:: alert http any any -> any any (http_request_line; to_md5; \ content:"|54 A9 7A 8A B0 9C 1B 81 37 25 22 14 51 D3 F9 97|"; sid:1;) +to_uppercase +------------ + +Converts the buffer to uppercase and passes the value on. + +This example alerts if ``http.uri`` contains ``THIS TEXT HAS BEEN CONVERTED TO LOWERCASE`` + +Example:: + + alert http any any -> any any (http.uri; to_uppercase; \ + content:"THIS TEXT HAS BEEN CONVERTED TO UPPERCASE"; sid:1;) + to_sha1 --------- @@ -134,3 +158,33 @@ Example:: alert http any any -> any any (msg:"HTTP with xor"; http.uri; \ xor:"0d0ac8ff"; content:"password="; sid:1;) + +header_lowercase +---------------- + +This transform is meant for HTTP/1 HTTP/2 header names normalization. +It lowercases the header names, while keeping untouched the header values. + +The implementation uses a state machine : +- it lowercases until it finds ``:``` +- it does not change until it finds a new line and switch back to first state + +This example alerts for both HTTP/1 and HTTP/2 with a authorization header +Example:: + + alert http any any -> any any (msg:"HTTP authorization"; http.header_names; \ + header_lowercase; content:"authorization:"; sid:1;) + +strip_pseudo_headers +-------------------- + +This transform is meant for HTTP/1 HTTP/2 header names normalization. +It strips HTTP2 pseudo-headers (names and values). + +The implementation just strips every line beginning by ``:``. + +This example alerts for both HTTP/1 and HTTP/2 with only a user agent +Example:: + + alert http any any -> any any (msg:"HTTP ua only"; http.header_names; \ + bsize:16; content:"|0d 0a|User-Agent|0d 0a 0d 0a|"; nocase; sid:1;) diff --git a/doc/userguide/upgrade.rst b/doc/userguide/upgrade.rst index 991e55ae75c1..d7b74a02754c 100644 --- a/doc/userguide/upgrade.rst +++ b/doc/userguide/upgrade.rst @@ -59,6 +59,8 @@ Security changes ` and :ref:`Datasets File Locations ` for more information. - Lua rules are now disabled by default (change also introduced in 6.0.13), see :ref:`lua-detection`. +- Support for JA4 has been added. JA4 hashes will be computed when explicitly enabled or a rule uses + `ja4.hash`. JA4 hashes are output under a restricted set of conditions (see below): Removals ~~~~~~~~ @@ -133,6 +135,12 @@ Logging changes For more information, refer to: https://redmine.openinfosecfoundation.org/issues/1275. +- JA4 hashes are output under a restricted set of conditions when JA4 is dynamically or explicitly enabled: + + - Alerts: The signature causing the alert contains the `ja4.hash` keyword + - Logs: With QUIC logs iff outputs.quic.ja4 is enabled (default off) + - Logs: With TLS logs iff outputs.tls.ja4 is enabled (default off) + Deprecations ~~~~~~~~~~~~ - Multiple "include" fields in the configuration file will now issue a diff --git a/doc/userguide/verifying-source-files.rst b/doc/userguide/verifying-source-files.rst new file mode 100644 index 000000000000..52204b20d169 --- /dev/null +++ b/doc/userguide/verifying-source-files.rst @@ -0,0 +1,86 @@ +Verifying Suricata Source Distribution Files +============================================ + +Once the Suricata release distribution file has been downloaded, the PGP +signature should be verified. This can be done using the GPG application and +is usually available on Linux/BSD systems without having to manually install +any additional packages. For Mac or Windows systems installation packages can +be found at ``_. + +Verification Steps +------------------ + +These verification steps are for general guidance, the exact process and +commands may vary between operating systems. + +Downloading the Signature File +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The signature file needs to be downloaded as well as the distribution file. +Both files can be found at ``_. + +Importing the OISF Signing Key +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once both the signature file and Suricata distribution files are obtained, the +OISF signing key should be imported to the local gpg keyring. This can be done +by running the following command:: + + $ gpg --receive-keys 2BA9C98CCDF1E93A + +The above command should produce output similar to the following:: + + gpg: key 2BA9C98CCDF1E93A: public key "Open Information Security Foundation + (OISF) " imported + gpg: Total number processed: 1 + gpg: imported: 1 + +Verifying the Suricata Distribution File +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To verify the contents of the Suricata distribution file the following command +could be ran on the Suricata 7.0.5 distribution file:: + + $ gpg --verify suricata-7.0.5.tar.gz.sig suricata-7.0.5.tar.gz + +Depending on the trust level assigned to the OISF signing keys, something +similar to the following output should be seen:: + + $ gpg --verify suricata-7.0.5.tar.gz.sig suricata-7.0.5.tar.gz + gpg: Signature made Tue 23 Apr 2024 11:58:56 AM UTC + gpg: using RSA key B36FDAF2607E10E8FFA89E5E2BA9C98CCDF1E93A + gpg: checking the trustdb + gpg: marginals needed: 3 completes needed: 1 trust model: pgp + gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u + gpg: next trustdb check due at 2025-08-06 + gpg: Good signature from "Open Information Security Foundation (OISF) + " [ultimate] + +This indicates a valid signature and that the signing key is trusted. + +.. note:: If output from the `--verify` command is similar to the following:: + + gpg: Signature made Tue 23 Apr 2024 11:58:56 AM UTC + gpg: using RSA key B36FDAF2607E10E8FFA89E5E2BA9C98CCDF1E93A + gpg: Can't check signature: No public key + + This indicates that the OISF signing key was not imported to the local GPG + keyring. + +.. note:: If output from the `--verify` command is similar to the following:: + + gpg: Signature made Tue 23 Apr 2024 11:58:56 AM UTC + gpg: using RSA key B36FDAF2607E10E8FFA89E5E2BA9C98CCDF1E93A + gpg: Good signature from "Open Information Security Foundation (OISF) + " [unknown] + gpg: WARNING: This key is not certified with a trusted signature! + gpg: There is no indication that the signature belongs to the owner. + Primary key fingerprint: B36F DAF2 607E 10E8 FFA8 9E5E 2BA9 C98C CDF1 E93A + + This indicates that the OISF signing key was imported and the signatures are + valid, but either the keys have not been marked as trusted OR the keys are + possibly a forgery. + + If there are questions regarding the validity of the downloaded file, the + OISF team can be reached at `security @ oisf.net` (remove the spaces between + the `@` before sending). \ No newline at end of file diff --git a/etc/schema.json b/etc/schema.json index 28182cb95d98..531a09afd86b 100644 --- a/etc/schema.json +++ b/etc/schema.json @@ -51,6 +51,9 @@ "icmp_type": { "type": "integer" }, + "in_iface": { + "type": "string" + }, "log_level": { "type": "string" }, @@ -561,7 +564,7 @@ "renewal_time": { "type": "integer" }, - "requested_ip":{ + "requested_ip": { "type": "string" }, "subnet_mask": { @@ -570,7 +573,7 @@ "type": { "type": "string" }, - "vendor_class_identifier":{ + "vendor_class_identifier": { "type": "string" }, "dns_servers": { @@ -1324,12 +1327,35 @@ "body_md5": { "type": "string" }, + "cc": { + "type": "array", + "minItems": 1, + "items": { + "type": "string" + } + }, "date": { "type": "string" }, "from": { "type": "string" }, + "has_exe_url": { + "type": "boolean" + }, + "has_ipv4_url": { + "type": "boolean" + }, + "has_ipv6_url": { + "type": "boolean" + }, + "received": { + "type": "array", + "minItems": 1, + "items": { + "type": "string" + } + }, "status": { "type": "string" }, @@ -1339,39 +1365,30 @@ "subject_md5": { "type": "string" }, - "x_mailer": { - "type": "string" - }, - "url": { + "to": { "type": "array", "minItems": 1, "items": { "type": "string" } }, - "attachment": { + "url": { "type": "array", "minItems": 1, "items": { "type": "string" } }, - "to": { + "x_mailer": { + "type": "string" + }, + "attachment": { "type": "array", "minItems": 1, "items": { "type": "string" } }, - "has_ipv6_url": { - "type": "boolean" - }, - "has_ipv4_url": { - "type": "boolean" - }, - "has_exe_url": { - "type": "boolean" - }, "message_id": { "type": "string" } @@ -2805,6 +2822,9 @@ "password_message": { "type": "string" }, + "process_id": { + "type": "integer" + }, "protocol_version": { "type": "string" }, @@ -2817,6 +2837,9 @@ "sasl_response": { "type": "string" }, + "secret_key": { + "type": "integer" + }, "simple_query": { "type": "string" }, @@ -3035,6 +3058,9 @@ }, "additionalProperties": false }, + "ja4": { + "type": "string" + }, "sni": { "type": "string" }, @@ -3691,6 +3717,20 @@ "uptime": { "type": "integer" }, + "capture": { + "type": "object", + "properties": { + "kernel_packets": { + "type": "integer" + }, + "kernel_drops": { + "type": "integer" + }, + "kernel_ifdrops": { + "type": "integer" + } + } + }, "memcap_pressure": { "type": "integer" }, @@ -4845,6 +4885,9 @@ }, "rules_failed": { "type": "integer" + }, + "rules_skipped": { + "type": "integer" } }, "additionalProperties": false @@ -5419,6 +5462,9 @@ } }, "additionalProperties": false + }, + "ja4": { + "type": "string" } }, "additionalProperties": false diff --git a/qa/validate-cp.sh b/qa/validate-cp.sh new file mode 100755 index 000000000000..a3c6b1ca8ace --- /dev/null +++ b/qa/validate-cp.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +#set -x +#set -e + +if [ $# -ne 1 ]; then + echo "call with base branch (e.g. master-5.0.x)" + exit 1; +fi + +BASE=$1 +CHECK_BRANCH="${VALIDATE_CHECK_BRANCH:-remotes/origin/master}" + +test_cherrypicked_line() { + REV=$1 + #echo "\"REV $REV\"" + + CHERRY=$(echo $REV | grep '(cherry picked from commit' | awk '{print $5}'|awk -F')' '{print $1}' || return 1) + git branch -a --contains $CHERRY | grep " $CHECK_BRANCH$" &> /dev/null + if [ "$?" -ne 0 ]; then + echo -n "ERROR $CHERRY not found in $CHECK_BRANCH" + return 1 + else + echo -n "OK " + fi +} + +for rev in $(git rev-list --reverse origin/${BASE}..HEAD); do + echo -n "COMMIT $rev: " + + GREPOP=$(git log --format=%B -n 1 $rev | grep 'cherry picked from commit') + if [ ! -z "$GREPOP" ]; then + while IFS= read -r line; do + test_cherrypicked_line "$line" || exit 1 + done <<< "$GREPOP" + echo + else + echo "not a cherry-pick" + fi +done diff --git a/requirements.txt b/requirements.txt index 289c0223144a..6df1358f075f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,5 +3,5 @@ # Format: # # name {repo} {branch|tag} -libhtp https://github.com/OISF/libhtp 0.5.45 -suricata-update https://github.com/OISF/suricata-update 1.3.0 +libhtp https://github.com/OISF/libhtp 0.5.x +suricata-update https://github.com/OISF/suricata-update master diff --git a/rules/http2-events.rules b/rules/http2-events.rules index 868943a77bed..413fdd652cad 100644 --- a/rules/http2-events.rules +++ b/rules/http2-events.rules @@ -19,3 +19,5 @@ alert http2 any any -> any any (msg:"SURICATA HTTP2 invalid range header"; flow: alert http2 any any -> any any (msg:"SURICATA HTTP2 variable-length integer overflow"; flow:established; app-layer-event:http2.header_integer_overflow; classtype:protocol-command-decode; sid:2290011; rev:1;) alert http2 any any -> any any (msg:"SURICATA HTTP2 too many streams"; flow:established; app-layer-event:http2.too_many_streams; classtype:protocol-command-decode; sid:2290012; rev:1;) alert http2 any any -> any any (msg:"SURICATA HTTP2 authority host mismatch"; flow:established,to_server; app-layer-event:http2.authority_host_mismatch; classtype:protocol-command-decode; sid:2290013; rev:1;) +alert http2 any any -> any any (msg:"SURICATA HTTP2 user info in uri"; flow:established,to_server; app-layer-event:http2.userinfo_in_uri; classtype:protocol-command-decode; sid:2290014; rev:1;) +alert http2 any any -> any any (msg:"SURICATA HTTP2 reassembly limit reached"; flow:established; app-layer-event:http2.reassembly_limit_reached; classtype:protocol-command-decode; sid:2290015; rev:1;) diff --git a/rust/.cargo/config.in b/rust/.cargo/config.toml.in similarity index 100% rename from rust/.cargo/config.in rename to rust/.cargo/config.toml.in diff --git a/rust/.gitignore b/rust/.gitignore index d8c32e41fec6..70615afaaa0a 100644 --- a/rust/.gitignore +++ b/rust/.gitignore @@ -1,7 +1,8 @@ !Cargo.toml.in !Cargo.lock.in Cargo.toml -/.cargo/config +/.cargo/config.toml +!/.cargo/config.toml.in /Cargo.lock /target /vendor diff --git a/rust/Cargo.toml.in b/rust/Cargo.toml.in index 0bac7e159e0a..7a714e1057b8 100644 --- a/rust/Cargo.toml.in +++ b/rust/Cargo.toml.in @@ -2,6 +2,7 @@ name = "suricata" version = "@PACKAGE_VERSION@" edition = "2021" +rust-version = "1.63.0" [workspace] members = [".", "./derive"] @@ -20,6 +21,8 @@ lua_int8 = ["lua"] strict = [] debug = [] debug-validate = [] +ja3 = [] +ja4 = [] [dependencies] nom7 = { version="7.0", package="nom" } @@ -40,10 +43,10 @@ aes = "~0.7.5" aes-gcm = "~0.9.4" der-parser = "~8.2.0" -kerberos-parser = { version = "~0.7.1", default_features = false } +kerberos-parser = { version = "~0.7.1", default-features = false } # Kerberos parsing still depends on der-parser 6.0.1. -der-parser6 = { package = "der-parser", version = "~6.0.1", default_features = false } +der-parser6 = { package = "der-parser", version = "~6.0.1", default-features = false } sawp-modbus = "~0.12.1" sawp = "~0.12.1" diff --git a/rust/Makefile.am b/rust/Makefile.am index 2857288fefa3..db3935af3a86 100644 --- a/rust/Makefile.am +++ b/rust/Makefile.am @@ -1,5 +1,5 @@ EXTRA_DIST = src derive \ - .cargo/config.in \ + .cargo/config.toml.in \ cbindgen.toml \ dist/rust-bindings.h \ vendor @@ -16,6 +16,14 @@ if HAVE_LUA RUST_FEATURES += lua $(LUA_INT8) endif +if HAVE_JA3 +RUST_FEATURES += ja3 +endif + +if HAVE_JA4 +RUST_FEATURES += ja4 +endif + if DEBUG RUST_FEATURES += debug endif diff --git a/rust/src/applayer.rs b/rust/src/applayer.rs index 97db321e2249..7c00af465c17 100644 --- a/rust/src/applayer.rs +++ b/rust/src/applayer.rs @@ -58,6 +58,9 @@ impl StreamSlice { self.input_len } pub fn as_slice(&self) -> &[u8] { + if self.input.is_null() && self.input_len == 0 { + return &[]; + } unsafe { std::slice::from_raw_parts(self.input, self.input_len as usize) } } pub fn is_empty(&self) -> bool { @@ -193,11 +196,35 @@ impl AppLayerTxData { pub fn update_file_flags(&mut self, state_flags: u16) { if (self.file_flags & state_flags) != state_flags { SCLogDebug!("updating tx file_flags {:04x} with state flags {:04x}", self.file_flags, state_flags); - self.file_flags |= state_flags; + let mut nf = state_flags; + // With keyword filestore:both,flow : + // There may be some opened unclosed file in one direction without filestore + // As such it has tx file_flags had FLOWFILE_NO_STORE_TS or TC + // But a new file in the other direction may trigger filestore:both,flow + // And thus set state_flags FLOWFILE_STORE_TS + // If the file was opened without storing it, do not try to store just the end of it + if (self.file_flags & FLOWFILE_NO_STORE_TS) != 0 && (state_flags & FLOWFILE_STORE_TS) != 0 { + nf &= !FLOWFILE_STORE_TS; + } + if (self.file_flags & FLOWFILE_NO_STORE_TC) != 0 && (state_flags & FLOWFILE_STORE_TC) != 0 { + nf &= !FLOWFILE_STORE_TC; + } + self.file_flags |= nf; } } } +// need to keep in sync with C flow.h +pub const FLOWFILE_NO_STORE_TS: u16 = BIT_U16!(2); +pub const FLOWFILE_NO_STORE_TC: u16 = BIT_U16!(3); +pub const FLOWFILE_STORE_TS: u16 = BIT_U16!(12); +pub const FLOWFILE_STORE_TC: u16 = BIT_U16!(13); + +#[no_mangle] +pub unsafe extern "C" fn SCTxDataUpdateFileFlags(txd: &mut AppLayerTxData, state_flags: u16) { + txd.update_file_flags(state_flags); +} + #[macro_export] macro_rules!export_tx_data_get { ($name:ident, $type:ty) => { diff --git a/rust/src/applayertemplate/template.rs b/rust/src/applayertemplate/template.rs index acc6c26c37fa..dbbc7841fad5 100644 --- a/rust/src/applayertemplate/template.rs +++ b/rust/src/applayertemplate/template.rs @@ -17,6 +17,7 @@ use super::parser; use crate::applayer::{self, *}; +use crate::conf::conf_get; use crate::core::{AppProto, Flow, ALPROTO_UNKNOWN, IPPROTO_TCP}; use nom7 as nom; use std; @@ -24,10 +25,14 @@ use std::collections::VecDeque; use std::ffi::CString; use std::os::raw::{c_char, c_int, c_void}; +static mut TEMPLATE_MAX_TX: usize = 256; + static mut ALPROTO_TEMPLATE: AppProto = ALPROTO_UNKNOWN; #[derive(AppLayerEvent)] -enum TemplateEvent {} +enum TemplateEvent { + TooManyTransactions, +} pub struct TemplateTransaction { tx_id: u64, @@ -145,7 +150,13 @@ impl TemplateState { SCLogNotice!("Request: {}", request); let mut tx = self.new_tx(); tx.request = Some(request); + if self.transactions.len() >= unsafe {TEMPLATE_MAX_TX} { + tx.tx_data.set_event(TemplateEvent::TooManyTransactions as u8); + } self.transactions.push_back(tx); + if self.transactions.len() >= unsafe {TEMPLATE_MAX_TX} { + return AppLayerResult::err(); + } } Err(nom::Err::Incomplete(_)) => { // Not enough data. This parser doesn't give us a good indication @@ -429,6 +440,13 @@ pub unsafe extern "C" fn rs_template_register_parser() { if AppLayerParserConfParserEnabled(ip_proto_str.as_ptr(), parser.name) != 0 { let _ = AppLayerRegisterParser(&parser, alproto); } + if let Some(val) = conf_get("app-layer.protocols.template.max-tx") { + if let Ok(v) = val.parse::() { + TEMPLATE_MAX_TX = v; + } else { + SCLogError!("Invalid value for template.max-tx"); + } + } SCLogNotice!("Rust template parser registered."); } else { SCLogNotice!("Protocol detector and parser disabled for TEMPLATE."); diff --git a/rust/src/asn1/mod.rs b/rust/src/asn1/mod.rs index 4b77b0ca28d5..7496d4448810 100644 --- a/rust/src/asn1/mod.rs +++ b/rust/src/asn1/mod.rs @@ -33,7 +33,7 @@ pub struct Asn1<'a>(Vec>); enum Asn1DecodeError { InvalidKeywordParameter, MaxFrames, - BerError(Err), + BerError, } /// Enumeration of Asn1 checks @@ -282,8 +282,8 @@ impl From for Asn1DecodeError { } impl From> for Asn1DecodeError { - fn from(e: Err) -> Asn1DecodeError { - Asn1DecodeError::BerError(e) + fn from(_e: Err) -> Asn1DecodeError { + Asn1DecodeError::BerError } } diff --git a/rust/src/asn1/parse_rules.rs b/rust/src/asn1/parse_rules.rs index 540734c72fa5..9a857868db98 100644 --- a/rust/src/asn1/parse_rules.rs +++ b/rust/src/asn1/parse_rules.rs @@ -153,7 +153,7 @@ pub(super) fn asn1_parse_rule(input: &str) -> IResult<&str, DetectAsn1Data> { tag("relative_offset"), multispace1, verify(parse_i32_number, |v| { - *v >= -i32::from(std::u16::MAX) && *v <= i32::from(std::u16::MAX) + *v >= -i32::from(u16::MAX) && *v <= i32::from(u16::MAX) }), )(i) } diff --git a/rust/src/conf.rs b/rust/src/conf.rs index 50acf6cae895..0d28f4d8b494 100644 --- a/rust/src/conf.rs +++ b/rust/src/conf.rs @@ -81,13 +81,8 @@ pub fn conf_get(key: &str) -> Option<&str> { // Return the value of key as a boolean. A value that is not set is // the same as having it set to false. pub fn conf_get_bool(key: &str) -> bool { - if let Some(val) = conf_get(key) { - match val { - "1" | "yes" | "true" | "on" => { - return true; - }, - _ => {}, - } + if let Some("1" | "yes" | "true" | "on") = conf_get(key) { + return true; } return false; diff --git a/rust/src/dcerpc/dcerpc.rs b/rust/src/dcerpc/dcerpc.rs index 759d5c26343a..62b2e1164eff 100644 --- a/rust/src/dcerpc/dcerpc.rs +++ b/rust/src/dcerpc/dcerpc.rs @@ -507,8 +507,8 @@ impl DCERPCState { /// type: unsigned 32 bit integer /// description: call_id param derived from TCP Header /// * `dir`: - /// type: enum Direction - /// description: direction of the flow + /// type: enum Direction + /// description: direction of the flow /// /// Return value: /// Option mutable reference to DCERPCTransaction @@ -1293,7 +1293,7 @@ pub unsafe extern "C" fn rs_dcerpc_probe_tcp(_f: *const core::Flow, direction: u len: u32, rdir: *mut u8) -> AppProto { SCLogDebug!("Probing packet for DCERPC"); - if len == 0 { + if len == 0 || input.is_null() { return core::ALPROTO_UNKNOWN; } let slice: &[u8] = std::slice::from_raw_parts(input as *mut u8, len as usize); diff --git a/rust/src/dcerpc/dcerpc_udp.rs b/rust/src/dcerpc/dcerpc_udp.rs index 83707bddcb21..05d49973c864 100644 --- a/rust/src/dcerpc/dcerpc_udp.rs +++ b/rust/src/dcerpc/dcerpc_udp.rs @@ -311,7 +311,7 @@ pub unsafe extern "C" fn rs_dcerpc_probe_udp(_f: *const core::Flow, direction: u len: u32, rdir: *mut u8) -> core::AppProto { SCLogDebug!("Probing the packet for DCERPC/UDP"); - if len == 0 { + if len == 0 || input.is_null() { return core::ALPROTO_UNKNOWN; } let slice: &[u8] = std::slice::from_raw_parts(input as *mut u8, len as usize); @@ -410,11 +410,8 @@ mod tests { 0x1c, 0x7d, 0xcf, 0x11, ]; - match parser::parse_dcerpc_udp_header(request) { - Ok((_rem, _header)) => { - { assert!(false); } - } - _ => {} + if let Ok((_rem, _header)) = parser::parse_dcerpc_udp_header(request) { + { assert!(false); } } } diff --git a/rust/src/dcerpc/detect.rs b/rust/src/dcerpc/detect.rs index 81f2854ace6d..2a0209c80cc6 100644 --- a/rust/src/dcerpc/detect.rs +++ b/rust/src/dcerpc/detect.rs @@ -212,13 +212,8 @@ pub extern "C" fn rs_dcerpc_iface_match( } match state.get_hdr_type() { - Some(x) => match x { - DCERPC_TYPE_REQUEST | DCERPC_TYPE_RESPONSE => {} - _ => { - return 0; - } - }, - None => { + Some(DCERPC_TYPE_REQUEST | DCERPC_TYPE_RESPONSE) => {} + _ => { return 0; } }; diff --git a/rust/src/detect/error.rs b/rust/src/detect/error.rs index 4959e2c883a4..211c5afceb33 100644 --- a/rust/src/detect/error.rs +++ b/rust/src/detect/error.rs @@ -23,6 +23,7 @@ use nom7::error::{ErrorKind, ParseError}; #[derive(Debug, PartialEq, Eq)] pub enum RuleParseError { InvalidByteMath(String), + InvalidIPRep(String), Nom(I, ErrorKind), } diff --git a/rust/src/detect/iprep.rs b/rust/src/detect/iprep.rs index 16f5d9d5d15e..6604628edd13 100644 --- a/rust/src/detect/iprep.rs +++ b/rust/src/detect/iprep.rs @@ -16,10 +16,11 @@ */ use super::uint::*; -use nom7::bytes::complete::{is_a, take_while}; -use nom7::character::complete::{alpha0, char, digit1}; -use nom7::combinator::{all_consuming, map_opt, map_res, opt}; -use nom7::error::{make_error, ErrorKind}; +use crate::detect::error::RuleParseError; +use nom7::bytes::complete::tag; +use nom7::character::complete::multispace0; +use nom7::sequence::preceded; + use nom7::Err; use nom7::IResult; @@ -53,12 +54,17 @@ impl std::str::FromStr for DetectIPRepDataCmd { } } +/// value matching is done use `DetectUintData` logic. +/// isset matching is done using special `DetectUintData` value ">= 0" +/// isnotset matching bypasses `DetectUintData` and is handled directly +/// in the match function (in C). #[derive(Debug)] #[repr(C)] pub struct DetectIPRepData { pub du8: DetectUintData, pub cat: u8, pub cmd: DetectIPRepDataCmd, + pub isnotset: bool, // if true, ignores `du8` } pub fn is_alphanumeric_or_slash(chr: char) -> bool { @@ -75,36 +81,69 @@ extern "C" { pub fn SRepCatGetByShortname(name: *const c_char) -> u8; } -pub fn detect_parse_iprep(i: &str) -> IResult<&str, DetectIPRepData> { - let (i, _) = opt(is_a(" "))(i)?; - let (i, cmd) = map_res(alpha0, DetectIPRepDataCmd::from_str)(i)?; - let (i, _) = opt(is_a(" "))(i)?; - let (i, _) = char(',')(i)?; - let (i, _) = opt(is_a(" "))(i)?; - - let (i, name) = take_while(is_alphanumeric_or_slash)(i)?; - // copy as to have final zero - let namez = CString::new(name).unwrap(); - let cat = unsafe { SRepCatGetByShortname(namez.as_ptr()) }; - if cat == 0 { - return Err(Err::Error(make_error(i, ErrorKind::MapOpt))); +pub fn detect_parse_iprep(i: &str) -> IResult<&str, DetectIPRepData, RuleParseError<&str>> { + // Inner utility function for easy error creation. + fn make_error(reason: String) -> nom7::Err> { + Err::Error(RuleParseError::InvalidIPRep(reason)) + } + let (_, values) = nom7::multi::separated_list1( + tag(","), + preceded(multispace0, nom7::bytes::complete::is_not(",")), + )(i)?; + + let args = values.len(); + if args == 4 || args == 3 { + let cmd = if let Ok(cmd) = DetectIPRepDataCmd::from_str(values[0].trim()) { + cmd + } else { + return Err(make_error("invalid command".to_string())); + }; + let name = values[1].trim(); + let namez = if let Ok(name) = CString::new(name) { + name + } else { + return Err(make_error("invalid name".to_string())); + }; + let cat = unsafe { SRepCatGetByShortname(namez.as_ptr()) }; + if cat == 0 { + return Err(make_error("unknown category".to_string())); + } + + if args == 4 { + let mode = match detect_parse_uint_mode(values[2].trim()) { + Ok(val) => val.1, + Err(_) => return Err(make_error("invalid mode".to_string())), + }; + + let arg1 = match values[3].trim().parse::() { + Ok(val) => val, + Err(_) => return Err(make_error("invalid value".to_string())), + }; + let du8 = DetectUintData:: { + arg1, + arg2: 0, + mode, + }; + return Ok((i, DetectIPRepData { du8, cat, cmd, isnotset: false, })); + } else { + let (isnotset, mode, arg1) = match values[2].trim() { + "isset" => { (false, DetectUintMode::DetectUintModeGte, 0) }, + "isnotset" => { (true, DetectUintMode::DetectUintModeEqual, 0) }, + _ => { return Err(make_error("invalid mode".to_string())); }, + }; + let du8 = DetectUintData:: { + arg1, + arg2: 0, + mode, + }; + return Ok((i, DetectIPRepData { du8, cat, cmd, isnotset, })); + } + } else if args < 3 { + return Err(make_error("too few arguments".to_string())); + } else { + return Err(make_error("too many arguments".to_string())); } - let (i, _) = opt(is_a(" "))(i)?; - let (i, _) = char(',')(i)?; - let (i, _) = opt(is_a(" "))(i)?; - let (i, mode) = detect_parse_uint_mode(i)?; - let (i, _) = opt(is_a(" "))(i)?; - let (i, _) = char(',')(i)?; - let (i, _) = opt(is_a(" "))(i)?; - let (i, arg1) = map_opt(digit1, |s: &str| s.parse::().ok())(i)?; - let (i, _) = all_consuming(take_while(|c| c == ' '))(i)?; - let du8 = DetectUintData:: { - arg1, - arg2: 0, - mode, - }; - return Ok((i, DetectIPRepData { du8, cat, cmd })); } #[no_mangle] diff --git a/rust/src/detect/mod.rs b/rust/src/detect/mod.rs index 41c7ff2455bd..d33c9ae7fabf 100644 --- a/rust/src/detect/mod.rs +++ b/rust/src/detect/mod.rs @@ -24,3 +24,4 @@ pub mod parser; pub mod stream_size; pub mod uint; pub mod uri; +pub mod requires; diff --git a/rust/src/detect/requires.rs b/rust/src/detect/requires.rs new file mode 100644 index 000000000000..e9e1acac5087 --- /dev/null +++ b/rust/src/detect/requires.rs @@ -0,0 +1,805 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +use std::collections::{HashSet, VecDeque}; +use std::{cmp::Ordering, ffi::CStr}; + +// std::ffi::{c_char, c_int} is recommended these days, but requires +// Rust 1.64.0. +use std::os::raw::{c_char, c_int}; + +use nom7::bytes::complete::take_while; +use nom7::combinator::map; +use nom7::multi::{many1, separated_list1}; +use nom7::sequence::tuple; +use nom7::{ + branch::alt, + bytes::complete::{tag, take_till}, + character::complete::{char, multispace0}, + combinator::map_res, + sequence::preceded, + IResult, +}; + +#[derive(Debug, Eq, PartialEq)] +enum RequiresError { + /// Suricata is greater than the required version. + VersionGt, + + /// Suricata is less than the required version. + VersionLt(SuricataVersion), + + /// The running Suricata is missing a required feature. + MissingFeature(String), + + /// The Suricata version, of Suricata itself is bad and failed to parse. + BadSuricataVersion, + + /// The requires expression is bad and failed to parse. + BadRequires, + + /// MultipleVersions + MultipleVersions, + + /// Passed in requirements not a valid UTF-8 string. + Utf8Error, +} + +impl RequiresError { + /// Return a pointer to a C compatible constant error message. + const fn c_errmsg(&self) -> *const c_char { + let msg = match self { + Self::VersionGt => "Suricata version greater than required\0", + Self::VersionLt(_) => "Suricata version less than required\0", + Self::MissingFeature(_) => "Suricata missing a required feature\0", + Self::BadSuricataVersion => "Failed to parse running Suricata version\0", + Self::BadRequires => "Failed to parse requires expression\0", + Self::MultipleVersions => "Version may only be specified once\0", + Self::Utf8Error => "Requires expression is not valid UTF-8\0", + }; + msg.as_ptr() as *const c_char + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +enum VersionCompareOp { + Gt, + Gte, + Lt, + Lte, +} + +#[derive(Debug, Clone, Eq, PartialEq)] +struct SuricataVersion { + major: u8, + minor: u8, + patch: u8, +} + +impl PartialOrd for SuricataVersion { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SuricataVersion { + fn cmp(&self, other: &Self) -> Ordering { + match self.major.cmp(&other.major) { + Ordering::Equal => match self.minor.cmp(&other.minor) { + Ordering::Equal => self.patch.cmp(&other.patch), + other => other, + }, + other => other, + } + } +} + +impl std::fmt::Display for SuricataVersion { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}.{}.{}", self.major, self.minor, self.patch) + } +} + +impl SuricataVersion { + fn new(major: u8, minor: u8, patch: u8) -> Self { + Self { + major, + minor, + patch, + } + } +} + +/// Parse a version expression. +/// +/// Parse into a version expression into a nested array, for example: +/// +/// version: >= 7.0.3 < 8 | >= 8.0.3 +/// +/// would result in something like: +/// +/// [ +/// [{op: gte, version: 7.0.3}, {op:lt, version: 8}], +/// [{op: gte, version: 8.0.3}], +/// ] +fn parse_version_expression(input: &str) -> IResult<&str, Vec>> { + let sep = preceded(multispace0, tag("|")); + let inner_parser = many1(tuple((parse_op, parse_version))); + let (input, versions) = separated_list1(sep, inner_parser)(input)?; + + let versions = versions + .into_iter() + .map(|versions| { + versions + .into_iter() + .map(|(op, version)| RuleRequireVersion { op, version }) + .collect() + }) + .collect(); + + Ok((input, versions)) +} + +#[derive(Debug, Eq, PartialEq)] +struct RuleRequireVersion { + pub op: VersionCompareOp, + pub version: SuricataVersion, +} + +#[derive(Debug, Default, Eq, PartialEq)] +struct Requires { + pub features: Vec, + + /// The version expression. + /// + /// - All of the inner most must evaluate to true. + /// - To pass, any of the outer must be true. + pub version: Vec>, +} + +fn parse_op(input: &str) -> IResult<&str, VersionCompareOp> { + preceded( + multispace0, + alt(( + map(tag(">="), |_| VersionCompareOp::Gte), + map(tag(">"), |_| VersionCompareOp::Gt), + map(tag("<="), |_| VersionCompareOp::Lte), + map(tag("<"), |_| VersionCompareOp::Lt), + )), + )(input) +} + +/// Parse the next part of the version. +/// +/// That is all chars up to eof, or the next '.' or '-'. +fn parse_next_version_part(input: &str) -> IResult<&str, u8> { + map_res( + take_till(|c| c == '.' || c == '-' || c == ' '), + |s: &str| s.parse::(), + )(input) +} + +/// Parse a version string into a SuricataVersion. +fn parse_version(input: &str) -> IResult<&str, SuricataVersion> { + let (input, major) = preceded(multispace0, parse_next_version_part)(input)?; + let (input, minor) = if input.is_empty() || input.starts_with(' ') { + (input, 0) + } else { + preceded(char('.'), parse_next_version_part)(input)? + }; + let (input, patch) = if input.is_empty() || input.starts_with(' ') { + (input, 0) + } else { + preceded(char('.'), parse_next_version_part)(input)? + }; + + Ok((input, SuricataVersion::new(major, minor, patch))) +} + +fn parse_key_value(input: &str) -> IResult<&str, (&str, &str)> { + // Parse the keyword, any sequence of characters, numbers or "-" or "_". + let (input, key) = preceded( + multispace0, + take_while(|c: char| c.is_alphanumeric() || c == '-' || c == '_'), + )(input)?; + let (input, value) = preceded(multispace0, take_till(|c: char| c == ','))(input)?; + Ok((input, (key, value))) +} + +fn parse_requires(mut input: &str) -> Result { + let mut requires = Requires::default(); + + while !input.is_empty() { + let (rest, (keyword, value)) = + parse_key_value(input).map_err(|_| RequiresError::BadRequires)?; + match keyword { + "feature" => { + requires.features.push(value.trim().to_string()); + } + "version" => { + if !requires.version.is_empty() { + return Err(RequiresError::MultipleVersions); + } + let (_, versions) = + parse_version_expression(value).map_err(|_| RequiresError::BadRequires)?; + requires.version = versions; + } + _ => { + // Unknown keyword, allow by warn in case we extend + // this in the future. + SCLogWarning!("Unknown requires keyword: {}", keyword); + } + } + + // No consume any remaining ',' or whitespace. + input = rest.trim_start_matches(|c: char| c == ',' || c.is_whitespace()); + } + Ok(requires) +} + +fn parse_suricata_version(version: &CStr) -> Result { + let version = version + .to_str() + .map_err(|_| RequiresError::BadSuricataVersion.c_errmsg())?; + let (_, version) = + parse_version(version).map_err(|_| RequiresError::BadSuricataVersion.c_errmsg())?; + Ok(version) +} + +fn check_version( + version: &RuleRequireVersion, suricata_version: &SuricataVersion, +) -> Result<(), RequiresError> { + match version.op { + VersionCompareOp::Gt => { + if suricata_version <= &version.version { + return Err(RequiresError::VersionLt(version.version.clone())); + } + } + VersionCompareOp::Gte => { + if suricata_version < &version.version { + return Err(RequiresError::VersionLt(version.version.clone())); + } + } + VersionCompareOp::Lt => { + if suricata_version >= &version.version { + return Err(RequiresError::VersionGt); + } + } + VersionCompareOp::Lte => { + if suricata_version > &version.version { + return Err(RequiresError::VersionGt); + } + } + } + Ok(()) +} + +fn check_requires( + requires: &Requires, suricata_version: &SuricataVersion, +) -> Result<(), RequiresError> { + if !requires.version.is_empty() { + let mut errs = VecDeque::new(); + let mut ok = 0; + for or_versions in &requires.version { + let mut err = None; + for version in or_versions { + if let Err(_err) = check_version(version, suricata_version) { + err = Some(_err); + break; + } + } + if let Some(err) = err { + errs.push_back(err); + } else { + ok += 1; + } + } + if ok == 0 { + return Err(errs.pop_front().unwrap()); + } + } + + for feature in &requires.features { + if !crate::feature::requires(feature) { + return Err(RequiresError::MissingFeature(feature.to_string())); + } + } + + Ok(()) +} + +/// Status object to hold required features and the latest version of +/// Suricata required. +/// +/// Full qualified name as it is exposed to C. +#[derive(Debug, Default)] +pub struct SCDetectRequiresStatus { + min_version: Option, + features: HashSet, + + /// Number of rules that didn't meet a feature. + feature_count: u64, + + /// Number of rules where the Suricata version wasn't new enough. + lt_count: u64, + + /// Number of rules where the Suricata version was too new. + gt_count: u64, +} + +#[no_mangle] +pub extern "C" fn SCDetectRequiresStatusNew() -> *mut SCDetectRequiresStatus { + Box::into_raw(Box::default()) +} + +#[no_mangle] +pub unsafe extern "C" fn SCDetectRequiresStatusFree(status: *mut SCDetectRequiresStatus) { + if !status.is_null() { + std::mem::drop(Box::from_raw(status)); + } +} + +#[no_mangle] +pub unsafe extern "C" fn SCDetectRequiresStatusLog( + status: &mut SCDetectRequiresStatus, suricata_version: *const c_char, tenant_id: u32, +) { + let suricata_version = CStr::from_ptr(suricata_version) + .to_str() + .unwrap_or(""); + + let mut parts = vec![]; + if status.lt_count > 0 { + let min_version = status + .min_version + .as_ref() + .map(|v| v.to_string()) + .unwrap_or_else(|| "".to_string()); + let msg = format!( + "{} {} skipped because the running Suricata version {} is less than {}", + status.lt_count, + if status.lt_count > 1 { + "rules were" + } else { + "rule was" + }, + suricata_version, + &min_version + ); + parts.push(msg); + } + if status.gt_count > 0 { + let msg = format!( + "{} {} for an older version Suricata", + status.gt_count, + if status.gt_count > 1 { + "rules were skipped as they are" + } else { + "rule was skipped as it is" + } + ); + parts.push(msg); + } + if status.feature_count > 0 { + let features = status + .features + .iter() + .map(|f| f.to_string()) + .collect::>() + .join(", "); + let msg = format!( + "{}{} {} skipped because the running Suricata version does not have feature{}: [{}]", + if tenant_id > 0 { + format!("tenant id: {} ", tenant_id) + } else { + String::new() + }, + status.feature_count, + if status.feature_count > 1 { + "rules were" + } else { + "rule was" + }, + if status.feature_count > 1 { "s" } else { "" }, + &features + ); + parts.push(msg); + } + + let msg = parts.join("; "); + + if status.lt_count > 0 { + SCLogNotice!("{}", &msg); + } else if status.gt_count > 0 || status.feature_count > 0 { + SCLogInfo!("{}", &msg); + } +} + +/// Parse a "requires" rule option. +/// +/// Return values: +/// * 0 - OK, rule should continue loading +/// * -1 - Error parsing the requires content +/// * -4 - Requirements not met, don't continue loading the rule, this +/// value is chosen so it can be passed back to the options parser +/// as its treated as a non-fatal silent error. +#[no_mangle] +pub unsafe extern "C" fn SCDetectCheckRequires( + requires: *const c_char, suricata_version_string: *const c_char, errstr: *mut *const c_char, + status: &mut SCDetectRequiresStatus, +) -> c_int { + // First parse the running Suricata version. + let suricata_version = match parse_suricata_version(CStr::from_ptr(suricata_version_string)) { + Ok(version) => version, + Err(err) => { + *errstr = err; + return -1; + } + }; + + let requires = match CStr::from_ptr(requires) + .to_str() + .map_err(|_| RequiresError::Utf8Error) + .and_then(parse_requires) + { + Ok(requires) => requires, + Err(err) => { + *errstr = err.c_errmsg(); + return -1; + } + }; + + match check_requires(&requires, &suricata_version) { + Ok(()) => 0, + Err(err) => { + match &err { + RequiresError::VersionLt(version) => { + if let Some(min_version) = &status.min_version { + if version > min_version { + status.min_version = Some(version.clone()); + } + } else { + status.min_version = Some(version.clone()); + } + status.lt_count += 1; + } + RequiresError::MissingFeature(feature) => { + status.features.insert(feature.to_string()); + status.feature_count += 1; + } + RequiresError::VersionGt => { + status.gt_count += 1; + } + _ => {} + } + *errstr = err.c_errmsg(); + return -4; + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_suricata_version() { + // 7.1.1 < 7.1.2 + assert!(SuricataVersion::new(7, 1, 1) < SuricataVersion::new(7, 1, 2)); + + // 7.1.1 <= 7.1.2 + assert!(SuricataVersion::new(7, 1, 1) <= SuricataVersion::new(7, 1, 2)); + + // 7.1.1 <= 7.1.1 + assert!(SuricataVersion::new(7, 1, 1) <= SuricataVersion::new(7, 1, 1)); + + // NOT 7.1.1 < 7.1.1 + assert!(SuricataVersion::new(7, 1, 1) >= SuricataVersion::new(7, 1, 1)); + + // 7.3.1 < 7.22.1 + assert!(SuricataVersion::new(7, 3, 1) < SuricataVersion::new(7, 22, 1)); + + // 7.22.1 >= 7.3.4 + assert!(SuricataVersion::new(7, 22, 1) >= SuricataVersion::new(7, 3, 4)); + } + + #[test] + fn test_parse_op() { + assert_eq!(parse_op(">").unwrap().1, VersionCompareOp::Gt); + assert_eq!(parse_op(">=").unwrap().1, VersionCompareOp::Gte); + assert_eq!(parse_op("<").unwrap().1, VersionCompareOp::Lt); + assert_eq!(parse_op("<=").unwrap().1, VersionCompareOp::Lte); + + assert!(parse_op("=").is_err()); + } + + #[test] + fn test_parse_version() { + assert_eq!( + parse_version("7").unwrap().1, + SuricataVersion { + major: 7, + minor: 0, + patch: 0, + } + ); + + assert_eq!( + parse_version("7.1").unwrap().1, + SuricataVersion { + major: 7, + minor: 1, + patch: 0, + } + ); + + assert_eq!( + parse_version("7.1.2").unwrap().1, + SuricataVersion { + major: 7, + minor: 1, + patch: 2, + } + ); + + // Suricata pre-releases will have a suffix starting with a + // '-', so make sure we accept those versions as well. + assert_eq!( + parse_version("8.0.0-dev").unwrap().1, + SuricataVersion { + major: 8, + minor: 0, + patch: 0, + } + ); + + assert!(parse_version("7.1.2a").is_err()); + assert!(parse_version("a").is_err()); + assert!(parse_version("777").is_err()); + assert!(parse_version("product-1").is_err()); + } + + #[test] + fn test_parse_requires() { + let requires = parse_requires(" feature geoip").unwrap(); + assert_eq!(&requires.features[0], "geoip"); + + let requires = parse_requires(" feature geoip, feature lua ").unwrap(); + assert_eq!(&requires.features[0], "geoip"); + assert_eq!(&requires.features[1], "lua"); + + let requires = parse_requires("version >=7").unwrap(); + assert_eq!( + requires, + Requires { + features: vec![], + version: vec![vec![RuleRequireVersion { + op: VersionCompareOp::Gte, + version: SuricataVersion { + major: 7, + minor: 0, + patch: 0, + } + }]], + } + ); + + let requires = parse_requires("version >= 7.1").unwrap(); + assert_eq!( + requires, + Requires { + features: vec![], + version: vec![vec![RuleRequireVersion { + op: VersionCompareOp::Gte, + version: SuricataVersion { + major: 7, + minor: 1, + patch: 0, + } + }]], + } + ); + + let requires = parse_requires("feature output::file-store, version >= 7.1.2").unwrap(); + assert_eq!( + requires, + Requires { + features: vec!["output::file-store".to_string()], + version: vec![vec![RuleRequireVersion { + op: VersionCompareOp::Gte, + version: SuricataVersion { + major: 7, + minor: 1, + patch: 2, + } + }]], + } + ); + + let requires = parse_requires("feature geoip, version >= 7.1.2 < 8").unwrap(); + assert_eq!( + requires, + Requires { + features: vec!["geoip".to_string()], + version: vec![vec![ + RuleRequireVersion { + op: VersionCompareOp::Gte, + version: SuricataVersion { + major: 7, + minor: 1, + patch: 2, + }, + }, + RuleRequireVersion { + op: VersionCompareOp::Lt, + version: SuricataVersion { + major: 8, + minor: 0, + patch: 0, + } + } + ]], + } + ); + } + + #[test] + fn test_check_requires() { + // Have 7.0.4, require >= 8. + let suricata_version = SuricataVersion::new(7, 0, 4); + let requires = parse_requires("version >= 8").unwrap(); + assert_eq!( + check_requires(&requires, &suricata_version), + Err(RequiresError::VersionLt(SuricataVersion { + major: 8, + minor: 0, + patch: 0, + })), + ); + + // Have 7.0.4, require 7.0.3. + let suricata_version = SuricataVersion::new(7, 0, 4); + let requires = parse_requires("version >= 7.0.3").unwrap(); + assert_eq!(check_requires(&requires, &suricata_version), Ok(())); + + // Have 8.0.0, require >= 7.0.0 and < 8.0 + let suricata_version = SuricataVersion::new(8, 0, 0); + let requires = parse_requires("version >= 7.0.0 < 8").unwrap(); + assert_eq!( + check_requires(&requires, &suricata_version), + Err(RequiresError::VersionGt) + ); + + // Have 8.0.0, require >= 7.0.0 and < 9.0 + let suricata_version = SuricataVersion::new(8, 0, 0); + let requires = parse_requires("version >= 7.0.0 < 9").unwrap(); + assert_eq!(check_requires(&requires, &suricata_version), Ok(())); + + // Require feature foobar. + let suricata_version = SuricataVersion::new(8, 0, 0); + let requires = parse_requires("feature foobar").unwrap(); + assert_eq!( + check_requires(&requires, &suricata_version), + Err(RequiresError::MissingFeature("foobar".to_string())) + ); + + // Require feature foobar, but this time we have the feature. + let suricata_version = SuricataVersion::new(8, 0, 0); + let requires = parse_requires("feature true_foobar").unwrap(); + assert_eq!(check_requires(&requires, &suricata_version), Ok(())); + + let suricata_version = SuricataVersion::new(8, 0, 1); + let requires = parse_requires("version >= 7.0.3 < 8").unwrap(); + assert!(check_requires(&requires, &suricata_version).is_err()); + + let suricata_version = SuricataVersion::new(7, 0, 1); + let requires = parse_requires("version >= 7.0.3 < 8").unwrap(); + assert!(check_requires(&requires, &suricata_version).is_err()); + + let suricata_version = SuricataVersion::new(7, 0, 3); + let requires = parse_requires("version >= 7.0.3 < 8").unwrap(); + assert!(check_requires(&requires, &suricata_version).is_ok()); + + let suricata_version = SuricataVersion::new(8, 0, 3); + let requires = parse_requires("version >= 7.0.3 < 8 | >= 8.0.3").unwrap(); + assert!(check_requires(&requires, &suricata_version).is_ok()); + + let suricata_version = SuricataVersion::new(8, 0, 2); + let requires = parse_requires("version >= 7.0.3 < 8 | >= 8.0.3").unwrap(); + assert!(check_requires(&requires, &suricata_version).is_err()); + + let suricata_version = SuricataVersion::new(7, 0, 2); + let requires = parse_requires("version >= 7.0.3 < 8 | >= 8.0.3").unwrap(); + assert!(check_requires(&requires, &suricata_version).is_err()); + + let suricata_version = SuricataVersion::new(7, 0, 3); + let requires = parse_requires("version >= 7.0.3 < 8 | >= 8.0.3").unwrap(); + assert!(check_requires(&requires, &suricata_version).is_ok()); + + // Example of something that requires a fix/feature that was + // implemented in 7.0.5, 8.0.4, 9.0.3. + let requires = parse_requires("version >= 7.0.5 < 8 | >= 8.0.4 < 9 | >= 9.0.3").unwrap(); + assert!(check_requires(&requires, &SuricataVersion::new(6, 0, 0)).is_err()); + assert!(check_requires(&requires, &SuricataVersion::new(7, 0, 4)).is_err()); + assert!(check_requires(&requires, &SuricataVersion::new(7, 0, 5)).is_ok()); + assert!(check_requires(&requires, &SuricataVersion::new(8, 0, 3)).is_err()); + assert!(check_requires(&requires, &SuricataVersion::new(8, 0, 4)).is_ok()); + assert!(check_requires(&requires, &SuricataVersion::new(9, 0, 2)).is_err()); + assert!(check_requires(&requires, &SuricataVersion::new(9, 0, 3)).is_ok()); + assert!(check_requires(&requires, &SuricataVersion::new(10, 0, 0)).is_ok()); + + let requires = parse_requires("version >= 8 < 9").unwrap(); + assert!(check_requires(&requires, &SuricataVersion::new(6, 0, 0)).is_err()); + assert!(check_requires(&requires, &SuricataVersion::new(7, 0, 0)).is_err()); + assert!(check_requires(&requires, &SuricataVersion::new(8, 0, 0)).is_ok()); + assert!(check_requires(&requires, &SuricataVersion::new(9, 0, 0)).is_err()); + + // Unknown keyword. + let requires = parse_requires("feature lua, foo bar, version >= 7.0.3").unwrap(); + assert_eq!( + requires, + Requires { + features: vec!["lua".to_string()], + version: vec![vec![RuleRequireVersion { + op: VersionCompareOp::Gte, + version: SuricataVersion { + major: 7, + minor: 0, + patch: 3, + } + }]], + } + ); + } + + #[test] + fn test_parse_version_expression() { + let version_str = ">= 7.0.3 < 8 | >= 8.0.3"; + let (rest, versions) = parse_version_expression(version_str).unwrap(); + assert!(rest.is_empty()); + assert_eq!( + versions, + vec![ + vec![ + RuleRequireVersion { + op: VersionCompareOp::Gte, + version: SuricataVersion { + major: 7, + minor: 0, + patch: 3, + } + }, + RuleRequireVersion { + op: VersionCompareOp::Lt, + version: SuricataVersion { + major: 8, + minor: 0, + patch: 0, + } + }, + ], + vec![RuleRequireVersion { + op: VersionCompareOp::Gte, + version: SuricataVersion { + major: 8, + minor: 0, + patch: 3, + } + },], + ] + ); + } +} diff --git a/rust/src/detect/uint.rs b/rust/src/detect/uint.rs index 3d6a5baab0ca..312dad0ca96a 100644 --- a/rust/src/detect/uint.rs +++ b/rust/src/detect/uint.rs @@ -417,11 +417,8 @@ mod tests { assert!(false); } } - match detect_parse_uint::("2kb") { - Ok((_, _val)) => { - assert!(false); - } - Err(_) => {} + if let Ok((_, _val)) = detect_parse_uint::("2kb") { + assert!(false); } match detect_parse_uint::("3MB") { Ok((_, val)) => { diff --git a/rust/src/dhcp/dhcp.rs b/rust/src/dhcp/dhcp.rs index b69b675b8ce9..7f0da5526e19 100644 --- a/rust/src/dhcp/dhcp.rs +++ b/rust/src/dhcp/dhcp.rs @@ -184,7 +184,7 @@ pub unsafe extern "C" fn rs_dhcp_probing_parser(_flow: *const Flow, input_len: u32, _rdir: *mut u8) -> AppProto { - if input_len < DHCP_MIN_FRAME_LEN { + if input_len < DHCP_MIN_FRAME_LEN || input.is_null() { return ALPROTO_UNKNOWN; } diff --git a/rust/src/dhcp/logger.rs b/rust/src/dhcp/logger.rs index b29e2158ef95..a9d8d983b6cb 100644 --- a/rust/src/dhcp/logger.rs +++ b/rust/src/dhcp/logger.rs @@ -89,7 +89,7 @@ impl DHCPLogger { js.set_uint("id", header.txid as u64)?; js.set_string("client_mac", - &format_addr_hex(&header.clienthw.to_vec()))?; + &format_addr_hex(&header.clienthw))?; js.set_string("assigned_ip", &dns_print_addr(&header.yourip))?; if self.extended { @@ -229,7 +229,7 @@ impl DHCPLogger { fn log_opt_dns_server(&self, js: &mut JsonBuilder, option: &DHCPOptGeneric) -> Result<(), JsonError> { js.open_array("dns_servers")?; for i in 0..(option.data.len() / 4) { - let val = dns_print_addr(&option.data[(i * 4)..(i * 4) + 4].to_vec()); + let val = dns_print_addr(&option.data[(i * 4)..(i * 4) + 4]); js.append_string(&val)?; } js.close()?; @@ -239,7 +239,7 @@ impl DHCPLogger { fn log_opt_routers(&self, js: &mut JsonBuilder, option: &DHCPOptGeneric) -> Result<(), JsonError> { js.open_array("routers")?; for i in 0..(option.data.len() / 4) { - let val = dns_print_addr(&option.data[(i * 4)..(i * 4) + 4].to_vec()); + let val = dns_print_addr(&option.data[(i * 4)..(i * 4) + 4]); js.append_string(&val)?; } js.close()?; diff --git a/rust/src/dns/dns.rs b/rust/src/dns/dns.rs index 382c76ae59b5..d87577cfda09 100644 --- a/rust/src/dns/dns.rs +++ b/rust/src/dns/dns.rs @@ -555,6 +555,7 @@ impl DNSState { ); if size > 0 && cur_i.len() >= size + 2 { let msg = &cur_i[2..(size + 2)]; + sc_app_layer_parser_trigger_raw_stream_reassembly(flow, Direction::ToServer as i32); let _pdu = Frame::new( flow, &stream_slice, @@ -617,6 +618,7 @@ impl DNSState { ); if size > 0 && cur_i.len() >= size + 2 { let msg = &cur_i[2..(size + 2)]; + sc_app_layer_parser_trigger_raw_stream_reassembly(flow, Direction::ToClient as i32); let _pdu = Frame::new( flow, &stream_slice, @@ -917,7 +919,7 @@ pub unsafe extern "C" fn rs_dns_tx_get_query_rrtype( pub unsafe extern "C" fn rs_dns_probe( _flow: *const core::Flow, _dir: u8, input: *const u8, len: u32, rdir: *mut u8, ) -> AppProto { - if len == 0 || len < std::mem::size_of::() as u32 { + if input.is_null() || len < std::mem::size_of::() as u32 { return core::ALPROTO_UNKNOWN; } let slice: &[u8] = std::slice::from_raw_parts(input as *mut u8, len as usize); @@ -938,7 +940,7 @@ pub unsafe extern "C" fn rs_dns_probe( pub unsafe extern "C" fn rs_dns_probe_tcp( _flow: *const core::Flow, direction: u8, input: *const u8, len: u32, rdir: *mut u8, ) -> AppProto { - if len == 0 || len < std::mem::size_of::() as u32 + 2 { + if input.is_null() || len < std::mem::size_of::() as u32 + 2 { return core::ALPROTO_UNKNOWN; } let slice: &[u8] = std::slice::from_raw_parts(input as *mut u8, len as usize); diff --git a/rust/src/dns/log.rs b/rust/src/dns/log.rs index 5212b1a0da7c..86325d5f0735 100644 --- a/rust/src/dns/log.rs +++ b/rust/src/dns/log.rs @@ -368,7 +368,7 @@ pub fn dns_rcode_string(flags: u16) -> String { } /// Format bytes as an IP address string. -pub fn dns_print_addr(addr: &Vec) -> std::string::String { +pub fn dns_print_addr(addr: &[u8]) -> std::string::String { if addr.len() == 4 { return format!("{}.{}.{}.{}", addr[0], addr[1], addr[2], addr[3]); } else if addr.len() == 16 { diff --git a/rust/src/feature.rs b/rust/src/feature.rs new file mode 100644 index 000000000000..abd09669af11 --- /dev/null +++ b/rust/src/feature.rs @@ -0,0 +1,60 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +//! Rust bindings to the "feature" API. +//! +//! As this feature module is a binding to a Suricata C module it is +//! not available to Rust unit tests. Instead when running Rust unit +//! tests and "mock" version is provided that will return true for any +//! feature starting with "true" and false for any other feature name. + +#[cfg(test)] +mod mock { + /// Check for a feature returning true if found. + /// + /// This a "mock" variant of `requires` that will return true for + /// any feature starting with string `true`, and false for + /// anything else. + pub fn requires(feature: &str) -> bool { + return feature.starts_with("true"); + } +} + +#[cfg(not(test))] +mod real { + use std::ffi::CString; + use std::os::raw::c_char; + + extern "C" { + fn RequiresFeature(feature: *const c_char) -> bool; + } + + /// Check for a feature returning true if found. + pub fn requires(feature: &str) -> bool { + if let Ok(feature) = CString::new(feature) { + unsafe { RequiresFeature(feature.as_ptr()) } + } else { + false + } + } +} + +#[cfg(not(test))] +pub use real::*; + +#[cfg(test)] +pub use mock::*; diff --git a/rust/src/ftp/mod.rs b/rust/src/ftp/mod.rs index 3839c9661e0d..aae5ff4afe56 100644 --- a/rust/src/ftp/mod.rs +++ b/rust/src/ftp/mod.rs @@ -56,9 +56,9 @@ pub fn ftp_active_port(i: &[u8]) -> IResult<&[u8], u16> { digit1, tag(","), ))(i)?; - let (i, part1) = verify(parse_u16, |&v| v <= std::u8::MAX as u16)(i)?; + let (i, part1) = verify(parse_u16, |&v| v <= u8::MAX as u16)(i)?; let (i, _) = tag(",")(i)?; - let (i, part2) = verify(parse_u16, |&v| v <= std::u8::MAX as u16)(i)?; + let (i, part2) = verify(parse_u16, |&v| v <= u8::MAX as u16)(i)?; Ok((i, part1 * 256 + part2)) } @@ -77,9 +77,9 @@ pub fn ftp_pasv_response(i: &[u8]) -> IResult<&[u8], u16> { digit1, tag(","), ))(i)?; - let (i, part1) = verify(getu16, |&v| v <= std::u8::MAX as u16)(i)?; + let (i, part1) = verify(getu16, |&v| v <= u8::MAX as u16)(i)?; let (i, _) = tag(",")(i)?; - let (i, part2) = verify(getu16, |&v| v <= std::u8::MAX as u16)(i)?; + let (i, part2) = verify(getu16, |&v| v <= u8::MAX as u16)(i)?; // may also be completed by a final point let (i, _) = tag(")")(i)?; let (i, _) = opt(complete(tag(".")))(i)?; diff --git a/rust/src/http2/detect.rs b/rust/src/http2/detect.rs index 1c595a0cb0f8..0e7cee87573d 100644 --- a/rust/src/http2/detect.rs +++ b/rust/src/http2/detect.rs @@ -23,6 +23,7 @@ use crate::core::Direction; use crate::detect::uint::{detect_match_uint, DetectUintData}; use std::ffi::CStr; use std::str::FromStr; +use std::rc::Rc; fn http2_tx_has_frametype( tx: &mut HTTP2Transaction, direction: Direction, value: u8, @@ -404,7 +405,7 @@ fn http2_frames_get_header_firstvalue<'a>( for frame in frames { if let Some(blocks) = http2_header_blocks(frame) { for block in blocks.iter() { - if block.name == name.as_bytes() { + if block.name.as_ref() == name.as_bytes() { return Ok(&block.value); } } @@ -428,15 +429,15 @@ pub fn http2_frames_get_header_value_vec( for frame in frames { if let Some(blocks) = http2_header_blocks(frame) { for block in blocks.iter() { - if block.name == name.as_bytes() { + if block.name.as_ref() == name.as_bytes() { if found == 0 { vec.extend_from_slice(&block.value); found = 1; - } else if found == 1 { + } else if found == 1 && Rc::strong_count(&block.name) <= 2 { vec.extend_from_slice(&[b',', b' ']); vec.extend_from_slice(&block.value); found = 2; - } else { + } else if Rc::strong_count(&block.name) <= 2 { vec.extend_from_slice(&[b',', b' ']); vec.extend_from_slice(&block.value); } @@ -465,18 +466,18 @@ fn http2_frames_get_header_value<'a>( for frame in frames { if let Some(blocks) = http2_header_blocks(frame) { for block in blocks.iter() { - if block.name == name.as_bytes() { + if block.name.as_ref() == name.as_bytes() { if found == 0 { single = Ok(&block.value); found = 1; - } else if found == 1 { + } else if found == 1 && Rc::strong_count(&block.name) <= 2 { if let Ok(s) = single { vec.extend_from_slice(s); } vec.extend_from_slice(&[b',', b' ']); vec.extend_from_slice(&block.value); found = 2; - } else { + } else if Rc::strong_count(&block.name) <= 2 { vec.extend_from_slice(&[b',', b' ']); vec.extend_from_slice(&block.value); } @@ -545,7 +546,7 @@ fn http2_tx_get_resp_line(tx: &mut HTTP2Transaction) { } else { &empty }; - resp_line.extend(b" HTTP/2 "); + resp_line.extend(b"HTTP/2 "); resp_line.extend(status); resp_line.extend(b"\r\n"); tx.resp_line.extend(resp_line) @@ -920,8 +921,8 @@ fn http2_tx_set_header(state: &mut HTTP2State, name: &[u8], input: &[u8]) { }; let mut blocks = Vec::new(); let b = parser::HTTP2FrameHeaderBlock { - name: name.to_vec(), - value: input.to_vec(), + name: Rc::new(name.to_vec()), + value: Rc::new(input.to_vec()), error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess, sizeupdate: 0, }; @@ -932,7 +933,7 @@ fn http2_tx_set_header(state: &mut HTTP2State, name: &[u8], input: &[u8]) { blocks, }; let txdata = HTTP2FrameTypeData::HEADERS(hs); - let tx = state.find_or_create_tx(&head, &txdata, Direction::ToServer); + let tx = state.find_or_create_tx(&head, &txdata, Direction::ToServer).unwrap(); tx.frames_ts.push(HTTP2Frame { header: head, data: txdata, @@ -975,7 +976,7 @@ fn http2_tx_set_settings(state: &mut HTTP2State, input: &[u8]) { match parser::http2_parse_frame_settings(&dec) { Ok((_, set)) => { let txdata = HTTP2FrameTypeData::SETTINGS(set); - let tx = state.find_or_create_tx(&head, &txdata, Direction::ToServer); + let tx = state.find_or_create_tx(&head, &txdata, Direction::ToServer).unwrap(); tx.frames_ts.push(HTTP2Frame { header: head, data: txdata, @@ -1063,15 +1064,15 @@ mod tests { }; let mut blocks = Vec::new(); let b = parser::HTTP2FrameHeaderBlock { - name: "Host".as_bytes().to_vec(), - value: "abc.com".as_bytes().to_vec(), + name: "Host".as_bytes().to_vec().into(), + value: "abc.com".as_bytes().to_vec().into(), error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess, sizeupdate: 0, }; blocks.push(b); let b2 = parser::HTTP2FrameHeaderBlock { - name: "Host".as_bytes().to_vec(), - value: "efg.net".as_bytes().to_vec(), + name: "Host".as_bytes().to_vec().into(), + value: "efg.net".as_bytes().to_vec().into(), error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess, sizeupdate: 0, }; diff --git a/rust/src/http2/http2.rs b/rust/src/http2/http2.rs index bbaeddb40434..23aaf261ff76 100644 --- a/rust/src/http2/http2.rs +++ b/rust/src/http2/http2.rs @@ -61,6 +61,8 @@ const HTTP2_FRAME_RSTSTREAM_LEN: usize = 4; const HTTP2_FRAME_PRIORITY_LEN: usize = 5; const HTTP2_FRAME_WINDOWUPDATE_LEN: usize = 4; pub static mut HTTP2_MAX_TABLESIZE: u32 = 65536; // 0x10000 +// maximum size of reassembly for header + continuation +static mut HTTP2_MAX_REASS: usize = 102400; static mut HTTP2_MAX_STREAMS: usize = 4096; // 0x1000 #[repr(u8)] @@ -206,10 +208,15 @@ impl HTTP2Transaction { let mut authority = None; let mut host = None; for block in blocks { - if block.name == b"content-encoding" { + if block.name.as_ref() == b"content-encoding" { self.decoder.http2_encoding_fromvec(&block.value, dir); } else if block.name.eq_ignore_ascii_case(b":authority") { authority = Some(&block.value); + if block.value.iter().any(|&x| x == b'@') { + // it is forbidden by RFC 9113 to have userinfo in this field + // when in HTTP1 we can have user:password@domain.com + self.set_event(HTTP2Event::UserinfoInUri); + } } else if block.name.eq_ignore_ascii_case(b"host") { host = Some(&block.value); } @@ -400,6 +407,8 @@ pub enum HTTP2Event { HeaderIntegerOverflow, TooManyStreams, AuthorityHostMismatch, + UserinfoInUri, + ReassemblyLimitReached, } pub struct HTTP2DynTable { @@ -426,6 +435,12 @@ impl HTTP2DynTable { } } +#[derive(Default)] +struct HTTP2HeaderReassemblyBuffer { + data: Vec, + stream_id: u32, +} + pub struct HTTP2State { state_data: AppLayerStateData, tx_id: u64, @@ -435,6 +450,9 @@ pub struct HTTP2State { dynamic_headers_tc: HTTP2DynTable, transactions: VecDeque, progress: HTTP2ConnectionState, + + c2s_buf: HTTP2HeaderReassemblyBuffer, + s2c_buf: HTTP2HeaderReassemblyBuffer, } impl State for HTTP2State { @@ -467,6 +485,8 @@ impl HTTP2State { dynamic_headers_tc: HTTP2DynTable::new(), transactions: VecDeque::new(), progress: HTTP2ConnectionState::Http2StateInit, + c2s_buf: HTTP2HeaderReassemblyBuffer::default(), + s2c_buf: HTTP2HeaderReassemblyBuffer::default(), } } @@ -591,9 +611,21 @@ impl HTTP2State { pub fn find_or_create_tx( &mut self, header: &parser::HTTP2FrameHeader, data: &HTTP2FrameTypeData, dir: Direction, - ) -> &mut HTTP2Transaction { + ) -> Option<&mut HTTP2Transaction> { if header.stream_id == 0 { - return self.create_global_tx(); + if self.transactions.len() >= unsafe { HTTP2_MAX_STREAMS } { + for tx_old in &mut self.transactions { + if tx_old.state == HTTP2TransactionState::HTTP2StateTodrop { + // loop was already run + break; + } + tx_old.set_event(HTTP2Event::TooManyStreams); + // use a distinct state, even if we do not log it + tx_old.state = HTTP2TransactionState::HTTP2StateTodrop; + } + return None; + } + return Some(self.create_global_tx()); } let sid = match data { //yes, the right stream_id for Suricata is not the header one @@ -623,30 +655,31 @@ impl HTTP2State { let tx = &mut self.transactions[index - 1]; tx.tx_data.update_file_flags(self.state_data.file_flags); tx.update_file_flags(tx.tx_data.file_flags); - return tx; + return Some(tx); } else { - let mut tx = HTTP2Transaction::new(); - self.tx_id += 1; - tx.tx_id = self.tx_id; - tx.stream_id = sid; - tx.state = HTTP2TransactionState::HTTP2StateOpen; // do not use SETTINGS_MAX_CONCURRENT_STREAMS as it can grow too much - if self.transactions.len() > unsafe { HTTP2_MAX_STREAMS } { - // set at least one another transaction to the drop state + if self.transactions.len() >= unsafe { HTTP2_MAX_STREAMS } { for tx_old in &mut self.transactions { - if tx_old.state != HTTP2TransactionState::HTTP2StateTodrop { - // use a distinct state, even if we do not log it - tx_old.set_event(HTTP2Event::TooManyStreams); - tx_old.state = HTTP2TransactionState::HTTP2StateTodrop; + if tx_old.state == HTTP2TransactionState::HTTP2StateTodrop { + // loop was already run break; } + tx_old.set_event(HTTP2Event::TooManyStreams); + // use a distinct state, even if we do not log it + tx_old.state = HTTP2TransactionState::HTTP2StateTodrop; } + return None; } + let mut tx = HTTP2Transaction::new(); + self.tx_id += 1; + tx.tx_id = self.tx_id; + tx.stream_id = sid; + tx.state = HTTP2TransactionState::HTTP2StateOpen; tx.tx_data.update_file_flags(self.state_data.file_flags); tx.update_file_flags(tx.tx_data.file_flags); tx.tx_data.file_tx = STREAM_TOSERVER|STREAM_TOCLIENT; // might hold files in both directions self.transactions.push_back(tx); - return self.transactions.back_mut().unwrap(); + return Some(self.transactions.back_mut().unwrap()); } } @@ -680,8 +713,11 @@ impl HTTP2State { } fn parse_frame_data( - &mut self, ftype: u8, input: &[u8], complete: bool, hflags: u8, dir: Direction, + &mut self, head: &parser::HTTP2FrameHeader, input: &[u8], complete: bool, dir: Direction, + reass_limit_reached: &mut bool, ) -> HTTP2FrameTypeData { + let ftype = head.ftype; + let hflags = head.flags; match num::FromPrimitive::from_u8(ftype) { Some(parser::HTTP2FrameType::GoAway) => { if input.len() < HTTP2_FRAME_GOAWAY_LEN { @@ -841,17 +877,47 @@ impl HTTP2State { return HTTP2FrameTypeData::DATA; } Some(parser::HTTP2FrameType::Continuation) => { + let buf = if dir == Direction::ToClient { + &mut self.s2c_buf + } else { + &mut self.c2s_buf + }; + if head.stream_id == buf.stream_id { + let max_reass = unsafe { HTTP2_MAX_REASS }; + if buf.data.len() + input.len() < max_reass { + buf.data.extend(input); + } else if buf.data.len() < max_reass { + buf.data.extend(&input[..max_reass - buf.data.len()]); + *reass_limit_reached = true; + } + if head.flags & parser::HTTP2_FLAG_HEADER_END_HEADERS == 0 { + let hs = parser::HTTP2FrameContinuation { + blocks: Vec::new(), + }; + return HTTP2FrameTypeData::CONTINUATION(hs); + } + } // else try to parse anyways + let input_reass = if head.stream_id == buf.stream_id { &buf.data } else { input }; + let dyn_headers = if dir == Direction::ToClient { &mut self.dynamic_headers_tc } else { &mut self.dynamic_headers_ts }; - match parser::http2_parse_frame_continuation(input, dyn_headers) { + match parser::http2_parse_frame_continuation(input_reass, dyn_headers) { Ok((_, hs)) => { + if head.stream_id == buf.stream_id { + buf.stream_id = 0; + buf.data.clear(); + } self.process_headers(&hs.blocks, dir); return HTTP2FrameTypeData::CONTINUATION(hs); } Err(Err::Incomplete(_)) => { + if head.stream_id == buf.stream_id { + buf.stream_id = 0; + buf.data.clear(); + } if complete { self.set_event(HTTP2Event::InvalidFrameData); return HTTP2FrameTypeData::UNHANDLED(HTTP2FrameUnhandled { @@ -864,6 +930,10 @@ impl HTTP2State { } } Err(_) => { + if head.stream_id == buf.stream_id { + buf.stream_id = 0; + buf.data.clear(); + } self.set_event(HTTP2Event::InvalidFrameData); return HTTP2FrameTypeData::UNHANDLED(HTTP2FrameUnhandled { reason: HTTP2FrameUnhandledReason::ParsingError, @@ -872,6 +942,22 @@ impl HTTP2State { } } Some(parser::HTTP2FrameType::Headers) => { + if head.flags & parser::HTTP2_FLAG_HEADER_END_HEADERS == 0 { + let buf = if dir == Direction::ToClient { + &mut self.s2c_buf + } else { + &mut self.c2s_buf + }; + buf.data.clear(); + buf.data.extend(input); + buf.stream_id = head.stream_id; + let hs = parser::HTTP2FrameHeaders { + padlength: None, + priority: None, + blocks: Vec::new(), + }; + return HTTP2FrameTypeData::HEADERS(hs); + } let dyn_headers = if dir == Direction::ToClient { &mut self.dynamic_headers_tc } else { @@ -955,15 +1041,23 @@ impl HTTP2State { input = &rem[hlsafe..]; continue; } + let mut reass_limit_reached = false; let txdata = self.parse_frame_data( - head.ftype, + &head, &rem[..hlsafe], complete, - head.flags, dir, + &mut reass_limit_reached, ); let tx = self.find_or_create_tx(&head, &txdata, dir); + if tx.is_none() { + return AppLayerResult::err(); + } + let tx = tx.unwrap(); + if reass_limit_reached { + tx.tx_data.set_event(HTTP2Event::ReassemblyLimitReached as u8); + } tx.handle_frame(&head, &txdata, dir); let over = head.flags & parser::HTTP2_FLAG_HEADER_EOS != 0; let ftype = head.ftype; @@ -1300,6 +1394,13 @@ pub unsafe extern "C" fn rs_http2_register_parser() { SCLogError!("Invalid value for http2.max-table-size"); } } + if let Some(val) = conf_get("app-layer.protocols.http2.max-reassembly-size") { + if let Ok(v) = val.parse::() { + HTTP2_MAX_REASS = v as usize; + } else { + SCLogError!("Invalid value for http2.max-reassembly-size"); + } + } SCLogDebug!("Rust http2 parser registered."); } else { SCLogNotice!("Protocol detector and parser disabled for HTTP2."); diff --git a/rust/src/http2/logger.rs b/rust/src/http2/logger.rs index d25f852c43ab..a117a5462214 100644 --- a/rust/src/http2/logger.rs +++ b/rust/src/http2/logger.rs @@ -19,7 +19,8 @@ use super::http2::{HTTP2Frame, HTTP2FrameTypeData, HTTP2Transaction}; use super::parser; use crate::jsonbuilder::{JsonBuilder, JsonError}; use std; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; +use std::rc::Rc; #[derive(Hash, PartialEq, Eq, Debug)] enum HeaderName { @@ -35,10 +36,20 @@ fn log_http2_headers<'a>( blocks: &'a [parser::HTTP2FrameHeaderBlock], js: &mut JsonBuilder, common: &mut HashMap>, ) -> Result<(), JsonError> { + let mut logged_headers = HashSet::new(); for block in blocks { - js.start_object()?; + // delay js.start_object() because we skip suplicate headers match block.error { parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess => { + if Rc::strong_count(&block.name) > 2 { + // more than one reference in headers table + current headers + let ptr = Rc::as_ptr(&block.name) as usize; + if !logged_headers.insert(ptr) { + // only log once + continue; + } + } + js.start_object()?; js.set_string_from_bytes("name", &block.name)?; js.set_string_from_bytes("value", &block.value)?; if let Ok(name) = std::str::from_utf8(&block.name) { @@ -66,9 +77,11 @@ fn log_http2_headers<'a>( } } parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSizeUpdate => { + js.start_object()?; js.set_uint("table_size_update", block.sizeupdate)?; } _ => { + js.start_object()?; js.set_string("error", &block.error.to_string())?; } } diff --git a/rust/src/http2/parser.rs b/rust/src/http2/parser.rs index adabeb28c6e4..f1850511268d 100644 --- a/rust/src/http2/parser.rs +++ b/rust/src/http2/parser.rs @@ -30,6 +30,7 @@ use nom7::sequence::tuple; use nom7::{Err, IResult}; use std::fmt; use std::str::FromStr; +use std::rc::Rc; #[repr(u8)] #[derive(Clone, Copy, PartialEq, Eq, FromPrimitive, Debug)] @@ -295,8 +296,8 @@ fn http2_frame_header_static(n: u64, dyn_headers: &HTTP2DynTable) -> Option Option, - pub value: Vec, + // Use Rc reference counted so that indexed headers do not get copied. + // Otherwise, this leads to quadratic complexity in memory occupation. + pub name: Rc>, + pub value: Rc>, pub error: HTTP2HeaderDecodeStatus, pub sizeupdate: u64, } @@ -391,7 +394,7 @@ fn http2_parse_headers_block_literal_common<'a>( ) -> IResult<&'a [u8], HTTP2FrameHeaderBlock> { let (i3, name, error) = if index == 0 { match http2_parse_headers_block_string(input) { - Ok((r, n)) => Ok((r, n, HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess)), + Ok((r, n)) => Ok((r, Rc::new(n), HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess)), Err(e) => Err(e), } } else { @@ -403,7 +406,7 @@ fn http2_parse_headers_block_literal_common<'a>( )), None => Ok(( input, - Vec::new(), + Rc::new(Vec::new()), HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeNotIndexed, )), } @@ -413,7 +416,7 @@ fn http2_parse_headers_block_literal_common<'a>( i4, HTTP2FrameHeaderBlock { name, - value, + value: Rc::new(value), error, sizeupdate: 0, }, @@ -435,8 +438,8 @@ fn http2_parse_headers_block_literal_incindex<'a>( match r { Ok((r, head)) => { let headcopy = HTTP2FrameHeaderBlock { - name: head.name.to_vec(), - value: head.value.to_vec(), + name: head.name.clone(), + value: head.value.clone(), error: head.error, sizeupdate: 0, }; @@ -556,8 +559,8 @@ fn http2_parse_headers_block_dynamic_size<'a>( return Ok(( i3, HTTP2FrameHeaderBlock { - name: Vec::new(), - value: Vec::new(), + name: Rc::new(Vec::new()), + value: Rc::new(Vec::new()), error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSizeUpdate, sizeupdate: maxsize2, }, @@ -614,8 +617,8 @@ fn http2_parse_headers_blocks<'a>( // if we error from http2_parse_var_uint, we keep the first parsed headers if err.code == ErrorKind::LengthValue { blocks.push(HTTP2FrameHeaderBlock { - name: Vec::new(), - value: Vec::new(), + name: Rc::new(Vec::new()), + value: Rc::new(Vec::new()), error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeIntegerOverflow, sizeupdate: 0, }); @@ -765,8 +768,8 @@ mod tests { match r0 { Ok((remainder, hd)) => { // Check the first message. - assert_eq!(hd.name, ":method".as_bytes().to_vec()); - assert_eq!(hd.value, "GET".as_bytes().to_vec()); + assert_eq!(hd.name, ":method".as_bytes().to_vec().into()); + assert_eq!(hd.value, "GET".as_bytes().to_vec().into()); // And we should have no bytes left. assert_eq!(remainder.len(), 0); } @@ -782,8 +785,8 @@ mod tests { match r1 { Ok((remainder, hd)) => { // Check the first message. - assert_eq!(hd.name, "accept".as_bytes().to_vec()); - assert_eq!(hd.value, "*/*".as_bytes().to_vec()); + assert_eq!(hd.name, "accept".as_bytes().to_vec().into()); + assert_eq!(hd.value, "*/*".as_bytes().to_vec().into()); // And we should have no bytes left. assert_eq!(remainder.len(), 0); assert_eq!(dynh.table.len(), 1); @@ -802,8 +805,8 @@ mod tests { match result { Ok((remainder, hd)) => { // Check the first message. - assert_eq!(hd.name, ":authority".as_bytes().to_vec()); - assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec()); + assert_eq!(hd.name, ":authority".as_bytes().to_vec().into()); + assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec().into()); // And we should have no bytes left. assert_eq!(remainder.len(), 0); assert_eq!(dynh.table.len(), 2); @@ -820,8 +823,8 @@ mod tests { match r3 { Ok((remainder, hd)) => { // same as before - assert_eq!(hd.name, ":authority".as_bytes().to_vec()); - assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec()); + assert_eq!(hd.name, ":authority".as_bytes().to_vec().into()); + assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec().into()); // And we should have no bytes left. assert_eq!(remainder.len(), 0); assert_eq!(dynh.table.len(), 2); @@ -856,8 +859,8 @@ mod tests { match r2 { Ok((remainder, hd)) => { // Check the first message. - assert_eq!(hd.name, ":path".as_bytes().to_vec()); - assert_eq!(hd.value, "/doc/manual/html/index.html".as_bytes().to_vec()); + assert_eq!(hd.name, ":path".as_bytes().to_vec().into()); + assert_eq!(hd.value, "/doc/manual/html/index.html".as_bytes().to_vec().into()); // And we should have no bytes left. assert_eq!(remainder.len(), 0); assert_eq!(dynh.table.len(), 2); @@ -879,11 +882,8 @@ mod tests { match r { Ok((rem, ctx)) => { assert_eq!(ctx.id, HTTP2SettingsId::EnablePush); - match ctx.value { - Some(_) => { - panic!("Unexpected value"); - } - None => {} + if ctx.value.is_some() { + panic!("Unexpected value"); } assert_eq!(rem.len(), 0); } diff --git a/rust/src/ike/ikev1.rs b/rust/src/ike/ikev1.rs index 1e79c293cdea..6f598f9806f6 100644 --- a/rust/src/ike/ikev1.rs +++ b/rust/src/ike/ikev1.rs @@ -53,7 +53,7 @@ impl Ikev1ParticipantData { } pub fn update( - &mut self, key_exchange: &str, nonce: &str, transforms: &Vec>, + &mut self, key_exchange: &str, nonce: &str, transforms: &[Vec], ) { self.key_exchange = key_exchange.to_string(); self.nonce = nonce.to_string(); diff --git a/rust/src/ike/parser.rs b/rust/src/ike/parser.rs index dcc574534290..3ec5be04b914 100644 --- a/rust/src/ike/parser.rs +++ b/rust/src/ike/parser.rs @@ -69,8 +69,8 @@ pub struct IsakmpHeader { pub struct IsakmpPayloadHeader { pub next_payload: u8, - pub reserved: u8, - pub payload_length: u16, + pub _reserved: u8, + pub _payload_length: u16, } pub struct IsakmpPayload<'a> { @@ -83,24 +83,24 @@ pub struct IsakmpPayload<'a> { // 1 -> Security Association pub struct SecurityAssociationPayload<'a> { pub domain_of_interpretation: u32, - pub situation: Option<&'a [u8]>, + pub _situation: Option<&'a [u8]>, pub data: Option<&'a [u8]>, } // 2 -> Proposal pub struct ProposalPayload<'a> { - pub proposal_number: u8, - pub proposal_type: u8, - pub spi_size: u8, - pub number_transforms: u8, - pub spi: &'a [u8], + pub _proposal_number: u8, + pub _proposal_type: u8, + pub _spi_size: u8, + pub _number_transforms: u8, + pub _spi: &'a [u8], pub data: &'a [u8], } // 3 -> Transform pub struct TransformPayload<'a> { - pub transform_number: u8, - pub transform_type: u8, + pub _transform_number: u8, + pub _transform_type: u8, pub sa_attributes: &'a [u8], } @@ -286,7 +286,7 @@ pub fn parse_security_association(i: &[u8]) -> IResult<&[u8], SecurityAssociatio i, SecurityAssociationPayload { domain_of_interpretation, - situation, + _situation: situation, data, }, )) @@ -308,11 +308,11 @@ pub fn parse_proposal(i: &[u8]) -> IResult<&[u8], ProposalPayload> { take((start_i.len() - 4) - spi_size as usize)(b) })(i)?; let payload = ProposalPayload { - proposal_number, - proposal_type, - spi_size, - number_transforms, - spi, + _proposal_number: proposal_number, + _proposal_type: proposal_type, + _spi_size: spi_size, + _number_transforms: number_transforms, + _spi: spi, data: payload_data.unwrap_or_default(), }; Ok((i, payload)) @@ -326,8 +326,8 @@ pub fn parse_transform(i: &[u8], length: u16) -> IResult<&[u8], TransformPayload Ok(( i, TransformPayload { - transform_number, - transform_type, + _transform_number: transform_number, + _transform_type: transform_type, sa_attributes: payload_data.unwrap_or_default(), }, )) @@ -495,8 +495,8 @@ pub fn parse_ikev1_payload_list(i: &[u8]) -> IResult<&[u8], Vec> IsakmpPayload { payload_header: IsakmpPayloadHeader { next_payload, - reserved, - payload_length, + _reserved: reserved, + _payload_length: payload_length, }, data: payload_data.unwrap_or_default(), }, diff --git a/rust/src/ja4.rs b/rust/src/ja4.rs new file mode 100644 index 000000000000..883c00e8c3ca --- /dev/null +++ b/rust/src/ja4.rs @@ -0,0 +1,385 @@ +/* Copyright (C) 2023-2024 Open Information Security Foundation +* +* You can copy, redistribute or modify this Program under the terms of +* the GNU General Public License version 2 as published by the Free +* Software Foundation. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* version 2 along with this program; if not, write to the Free Software +* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +* 02110-1301, USA. + +// Author: Sascha Steinbiss + +*/ + +#[cfg(feature = "ja4")] +use digest::Digest; +use libc::c_uchar; +#[cfg(feature = "ja4")] +use sha2::Sha256; +#[cfg(feature = "ja4")] +use std::cmp::min; +use std::os::raw::c_char; +use tls_parser::{TlsCipherSuiteID, TlsExtensionType, TlsVersion}; + +#[derive(Debug, PartialEq)] +pub struct JA4 { + tls_version: Option, + ciphersuites: Vec, + extensions: Vec, + signature_algorithms: Vec, + domain: bool, + alpn: [char; 2], + quic: bool, + // Some extensions contribute to the total count component of the + // fingerprint, yet are not to be included in the SHA256 hash component. + // Let's track the count separately. + nof_exts: u16, +} + +impl Default for JA4 { + fn default() -> Self { + Self::new() + } +} + +// Stubs for when JA4 is disabled +#[cfg(not(feature = "ja4"))] +impl JA4 { + pub fn new() -> Self { + Self { + tls_version: None, + // Vec::new() does not allocate memory until filled, which we + // will not do here. + ciphersuites: Vec::new(), + extensions: Vec::new(), + signature_algorithms: Vec::new(), + domain: false, + alpn: ['0', '0'], + quic: false, + nof_exts: 0, + } + } + pub fn set_quic(&mut self) {} + pub fn set_tls_version(&mut self, _version: TlsVersion) {} + pub fn set_alpn(&mut self, _alpn: &[u8]) {} + pub fn add_cipher_suite(&mut self, _cipher: TlsCipherSuiteID) {} + pub fn add_extension(&mut self, _ext: TlsExtensionType) {} + pub fn add_signature_algorithm(&mut self, _sigalgo: u16) {} + pub fn get_hash(&self) -> String { + String::new() + } +} + +#[cfg(feature = "ja4")] +impl JA4 { + #[inline] + fn is_grease(val: u16) -> bool { + match val { + 0x0a0a | 0x1a1a | 0x2a2a | 0x3a3a | 0x4a4a | 0x5a5a | 0x6a6a | 0x7a7a | 0x8a8a + | 0x9a9a | 0xaaaa | 0xbaba | 0xcaca | 0xdada | 0xeaea | 0xfafa => true, + _ => false, + } + } + + #[inline] + fn version_to_ja4code(val: Option) -> &'static str { + match val { + Some(TlsVersion::Tls13) => "13", + Some(TlsVersion::Tls12) => "12", + Some(TlsVersion::Tls11) => "11", + Some(TlsVersion::Tls10) => "10", + Some(TlsVersion::Ssl30) => "s3", + // the TLS parser does not support SSL 1.0 and 2.0 hence no + // support for "s1"/"s2" + _ => "00", + } + } + + pub fn new() -> Self { + Self { + tls_version: None, + ciphersuites: Vec::with_capacity(20), + extensions: Vec::with_capacity(20), + signature_algorithms: Vec::with_capacity(20), + domain: false, + alpn: ['0', '0'], + quic: false, + nof_exts: 0, + } + } + + pub fn set_quic(&mut self) { + self.quic = true; + } + + pub fn set_tls_version(&mut self, version: TlsVersion) { + if JA4::is_grease(u16::from(version)) { + return; + } + // Track maximum of seen TLS versions + match self.tls_version { + None => { + self.tls_version = Some(version); + } + Some(cur_version) => { + if u16::from(version) > u16::from(cur_version) { + self.tls_version = Some(version); + } + } + } + } + + pub fn set_alpn(&mut self, alpn: &[u8]) { + if alpn.len() > 1 { + if alpn.len() == 2 { + // GREASE values are 2 bytes, so this could be one -- check + let v: u16 = (alpn[0] as u16) << 8 | alpn[alpn.len() - 1] as u16; + if JA4::is_grease(v) { + return; + } + } + self.alpn[0] = char::from(alpn[0]); + self.alpn[1] = char::from(alpn[alpn.len() - 1]); + } + } + + pub fn add_cipher_suite(&mut self, cipher: TlsCipherSuiteID) { + if JA4::is_grease(u16::from(cipher)) { + return; + } + self.ciphersuites.push(cipher); + } + + pub fn add_extension(&mut self, ext: TlsExtensionType) { + if JA4::is_grease(u16::from(ext)) { + return; + } + if ext != TlsExtensionType::ApplicationLayerProtocolNegotiation + && ext != TlsExtensionType::ServerName + { + self.extensions.push(ext); + } else if ext == TlsExtensionType::ServerName { + self.domain = true; + } + self.nof_exts += 1; + } + + pub fn add_signature_algorithm(&mut self, sigalgo: u16) { + if JA4::is_grease(sigalgo) { + return; + } + self.signature_algorithms.push(sigalgo); + } + + pub fn get_hash(&self) -> String { + // Calculate JA4_a + let ja4_a = format!( + "{proto}{version}{sni}{nof_c:02}{nof_e:02}{al1}{al2}", + proto = if self.quic { "q" } else { "t" }, + version = JA4::version_to_ja4code(self.tls_version), + sni = if self.domain { "d" } else { "i" }, + nof_c = min(99, self.ciphersuites.len()), + nof_e = min(99, self.nof_exts), + al1 = self.alpn[0], + al2 = self.alpn[1] + ); + + // Calculate JA4_b + let mut sorted_ciphers = self.ciphersuites.to_vec(); + sorted_ciphers.sort_by(|a, b| u16::from(*a).cmp(&u16::from(*b))); + let sorted_cipherstrings: Vec = sorted_ciphers + .iter() + .map(|v| format!("{:04x}", u16::from(*v))) + .collect(); + let mut sha = Sha256::new(); + let ja4_b_raw = sorted_cipherstrings.join(","); + sha.update(&ja4_b_raw); + let mut ja4_b = format!("{:x}", sha.finalize_reset()); + ja4_b.truncate(12); + + // Calculate JA4_c + let mut sorted_exts = self.extensions.to_vec(); + sorted_exts.sort_by(|a, b| u16::from(*a).cmp(&u16::from(*b))); + let sorted_extstrings: Vec = sorted_exts + .iter() + .map(|v| format!("{:04x}", u16::from(*v))) + .collect(); + let ja4_c1_raw = sorted_extstrings.join(","); + let unsorted_sigalgostrings: Vec = self + .signature_algorithms + .iter() + .map(|v| format!("{:04x}", (*v))) + .collect(); + let ja4_c2_raw = unsorted_sigalgostrings.join(","); + let ja4_c_raw = format!("{}_{}", ja4_c1_raw, ja4_c2_raw); + sha.update(&ja4_c_raw); + let mut ja4_c = format!("{:x}", sha.finalize()); + ja4_c.truncate(12); + + return format!("{}_{}_{}", ja4_a, ja4_b, ja4_c); + } +} + +#[no_mangle] +pub extern "C" fn SCJA4New() -> *mut JA4 { + let j = Box::new(JA4::new()); + Box::into_raw(j) +} + +#[no_mangle] +pub unsafe extern "C" fn SCJA4SetTLSVersion(j: &mut JA4, version: u16) { + j.set_tls_version(TlsVersion(version)); +} + +#[no_mangle] +pub unsafe extern "C" fn SCJA4AddCipher(j: &mut JA4, cipher: u16) { + j.add_cipher_suite(TlsCipherSuiteID(cipher)); +} + +#[no_mangle] +pub unsafe extern "C" fn SCJA4AddExtension(j: &mut JA4, ext: u16) { + j.add_extension(TlsExtensionType(ext)); +} + +#[no_mangle] +pub unsafe extern "C" fn SCJA4AddSigAlgo(j: &mut JA4, sigalgo: u16) { + j.add_signature_algorithm(sigalgo); +} + +#[no_mangle] +pub unsafe extern "C" fn SCJA4SetALPN(j: &mut JA4, proto: *const c_char, len: u16) { + let b: &[u8] = std::slice::from_raw_parts(proto as *const c_uchar, len as usize); + j.set_alpn(b); +} + +#[no_mangle] +pub unsafe extern "C" fn SCJA4GetHash(j: &mut JA4, out: &mut [u8; 36]) { + let hash = j.get_hash(); + out[0..36].copy_from_slice(hash.as_bytes()); +} + +#[no_mangle] +pub unsafe extern "C" fn SCJA4Free(j: &mut JA4) { + let ja4: Box = Box::from_raw(j); + std::mem::drop(ja4); +} + +#[cfg(all(test, feature = "ja4"))] +mod tests { + use super::*; + + #[test] + fn test_is_grease() { + let mut alpn = "foobar".as_bytes(); + let mut len = alpn.len(); + let v: u16 = (alpn[0] as u16) << 8 | alpn[len - 1] as u16; + assert!(!JA4::is_grease(v)); + + alpn = &[0x0a, 0x0a]; + len = alpn.len(); + let v: u16 = (alpn[0] as u16) << 8 | alpn[len - 1] as u16; + assert!(JA4::is_grease(v)); + } + + #[test] + fn test_tlsversion_max() { + let mut j = JA4::new(); + assert_eq!(j.tls_version, None); + j.set_tls_version(TlsVersion::Ssl30); + assert_eq!(j.tls_version, Some(TlsVersion::Ssl30)); + j.set_tls_version(TlsVersion::Tls12); + assert_eq!(j.tls_version, Some(TlsVersion::Tls12)); + j.set_tls_version(TlsVersion::Tls10); + assert_eq!(j.tls_version, Some(TlsVersion::Tls12)); + } + + #[test] + fn test_get_hash_limit_numbers() { + // Test whether the limitation of the extension and ciphersuite + // count to 99 is reflected correctly. + let mut j = JA4::new(); + + for i in 1..200 { + j.add_cipher_suite(TlsCipherSuiteID(i)); + } + for i in 1..200 { + j.add_extension(TlsExtensionType(i)); + } + + let mut s = j.get_hash(); + s.truncate(10); + assert_eq!(s, "t00i999900"); + } + + #[test] + fn test_short_alpn() { + let mut j = JA4::new(); + + j.set_alpn("a".as_bytes()); + let mut s = j.get_hash(); + s.truncate(10); + assert_eq!(s, "t00i000000"); + + j.set_alpn("aa".as_bytes()); + let mut s = j.get_hash(); + s.truncate(10); + assert_eq!(s, "t00i0000aa"); + } + + #[test] + fn test_get_hash() { + let mut j = JA4::new(); + + // the empty JA4 hash + let s = j.get_hash(); + assert_eq!(s, "t00i000000_e3b0c44298fc_d2e2adf7177b"); + + // set TLS version + j.set_tls_version(TlsVersion::Tls12); + let s = j.get_hash(); + assert_eq!(s, "t12i000000_e3b0c44298fc_d2e2adf7177b"); + + // set QUIC + j.set_quic(); + let s = j.get_hash(); + assert_eq!(s, "q12i000000_e3b0c44298fc_d2e2adf7177b"); + + // set GREASE extension, should be ignored + j.add_extension(TlsExtensionType(0x0a0a)); + let s = j.get_hash(); + assert_eq!(s, "q12i000000_e3b0c44298fc_d2e2adf7177b"); + + // set SNI extension, should only increase count and change i->d + j.add_extension(TlsExtensionType(0x0000)); + let s = j.get_hash(); + assert_eq!(s, "q12d000100_e3b0c44298fc_d2e2adf7177b"); + + // set ALPN extension, should only increase count and set end of JA4_a + j.set_alpn(b"h3-16"); + j.add_extension(TlsExtensionType::ApplicationLayerProtocolNegotiation); + let s = j.get_hash(); + assert_eq!(s, "q12d0002h6_e3b0c44298fc_d2e2adf7177b"); + + // set some ciphers + j.add_cipher_suite(TlsCipherSuiteID(0x1111)); + j.add_cipher_suite(TlsCipherSuiteID(0x0a20)); + j.add_cipher_suite(TlsCipherSuiteID(0xbada)); + let s = j.get_hash(); + assert_eq!(s, "q12d0302h6_f500716053f9_d2e2adf7177b"); + + // set some extensions and signature algorithms + j.add_extension(TlsExtensionType(0xface)); + j.add_extension(TlsExtensionType(0x0121)); + j.add_extension(TlsExtensionType(0x1234)); + j.add_signature_algorithm(0x6666); + let s = j.get_hash(); + assert_eq!(s, "q12d0305h6_f500716053f9_2debc8880bae"); + } +} diff --git a/rust/src/jsonbuilder.rs b/rust/src/jsonbuilder.rs index 82be09953c70..8b4296663689 100644 --- a/rust/src/jsonbuilder.rs +++ b/rust/src/jsonbuilder.rs @@ -1,4 +1,4 @@ -/* Copyright (C) 2020 Open Information Security Foundation +/* Copyright (C) 2020-2024 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free @@ -435,7 +435,7 @@ impl JsonBuilder { return Err(JsonError::InvalidState); } } - self.push_str(&val.to_string())?; + self.push_float(val)?; Ok(self) } @@ -527,6 +527,22 @@ impl JsonBuilder { } } + /// Set a key and a string value (from bytes) on an object, with a limited size + pub fn set_string_from_bytes_limited(&mut self, key: &str, val: &[u8], limit: usize) -> Result<&mut Self, JsonError> { + let mut valtrunc = Vec::new(); + let val = if val.len() > limit { + valtrunc.extend_from_slice(&val[..limit]); + valtrunc.extend_from_slice(b"[truncated]"); + &valtrunc + } else { + val + }; + match std::str::from_utf8(val) { + Ok(s) => self.set_string(key, s), + Err(_) => self.set_string(key, &try_string_from_bytes(val)?), + } + } + /// Set a key and a string field as the base64 encoded string of the value. pub fn set_base64(&mut self, key: &str, val: &[u8]) -> Result<&mut Self, JsonError> { match self.current_state() { @@ -597,7 +613,8 @@ impl JsonBuilder { Ok(self) } - pub fn set_float(&mut self, key: &str, val: f64) -> Result<&mut Self, JsonError> { + /// Set a key and a signed integer type on an object. + pub fn set_int(&mut self, key: &str, val: i64) -> Result<&mut Self, JsonError> { match self.current_state() { State::ObjectNth => { self.push(',')?; @@ -617,6 +634,26 @@ impl JsonBuilder { Ok(self) } + pub fn set_float(&mut self, key: &str, val: f64) -> Result<&mut Self, JsonError> { + match self.current_state() { + State::ObjectNth => { + self.push(',')?; + } + State::ObjectFirst => { + self.set_state(State::ObjectNth); + } + _ => { + debug_validate_fail!("invalid state"); + return Err(JsonError::InvalidState); + } + } + self.push('"')?; + self.push_str(key)?; + self.push_str("\":")?; + self.push_float(val)?; + Ok(self) + } + pub fn set_bool(&mut self, key: &str, val: bool) -> Result<&mut Self, JsonError> { match self.current_state() { State::ObjectNth => { @@ -644,6 +681,15 @@ impl JsonBuilder { self.buf.capacity() } + fn push_float(&mut self, val: f64) -> Result<(), JsonError> { + if val.is_nan() || val.is_infinite() { + self.push_str("null")?; + } else { + self.push_str(&val.to_string())?; + } + Ok(()) + } + /// Encode a string into the buffer, escaping as needed. /// /// The string is encoded into an intermediate vector as its faster @@ -940,6 +986,14 @@ pub unsafe extern "C" fn jb_set_uint(js: &mut JsonBuilder, key: *const c_char, v return false; } +#[no_mangle] +pub unsafe extern "C" fn jb_set_int(js: &mut JsonBuilder, key: *const c_char, val: i64) -> bool { + if let Ok(key) = CStr::from_ptr(key).to_str() { + return js.set_int(key, val).is_ok(); + } + return false; +} + #[no_mangle] pub unsafe extern "C" fn jb_set_float(js: &mut JsonBuilder, key: *const c_char, val: f64) -> bool { if let Ok(key) = CStr::from_ptr(key).to_str() { @@ -1294,6 +1348,48 @@ mod test { jb.close().unwrap(); assert_eq!(jb.buf, r#"[1.1,2.2]"#); } + + #[test] + fn test_set_nan() { + let mut jb = JsonBuilder::try_new_object().unwrap(); + jb.set_float("nan", f64::NAN).unwrap(); + jb.close().unwrap(); + assert_eq!(jb.buf, r#"{"nan":null}"#); + } + + #[test] + fn test_append_nan() { + let mut jb = JsonBuilder::try_new_array().unwrap(); + jb.append_float(f64::NAN).unwrap(); + jb.close().unwrap(); + assert_eq!(jb.buf, r#"[null]"#); + } + + #[test] + fn test_set_inf() { + let mut jb = JsonBuilder::try_new_object().unwrap(); + jb.set_float("inf", f64::INFINITY).unwrap(); + jb.close().unwrap(); + assert_eq!(jb.buf, r#"{"inf":null}"#); + + let mut jb = JsonBuilder::try_new_object().unwrap(); + jb.set_float("inf", f64::NEG_INFINITY).unwrap(); + jb.close().unwrap(); + assert_eq!(jb.buf, r#"{"inf":null}"#); + } + + #[test] + fn test_append_inf() { + let mut jb = JsonBuilder::try_new_array().unwrap(); + jb.append_float(f64::INFINITY).unwrap(); + jb.close().unwrap(); + assert_eq!(jb.buf, r#"[null]"#); + + let mut jb = JsonBuilder::try_new_array().unwrap(); + jb.append_float(f64::NEG_INFINITY).unwrap(); + jb.close().unwrap(); + assert_eq!(jb.buf, r#"[null]"#); + } } // Escape table as seen in serde-json (MIT/Apache license) diff --git a/rust/src/krb/krb5.rs b/rust/src/krb/krb5.rs index 3282d50ba78b..9ff0ff2157d9 100644 --- a/rust/src/krb/krb5.rs +++ b/rust/src/krb/krb5.rs @@ -363,6 +363,9 @@ pub unsafe extern "C" fn rs_krb5_probing_parser(_flow: *const Flow, input:*const u8, input_len: u32, _rdir: *mut u8) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice = build_slice!(input,input_len as usize); let alproto = ALPROTO_KRB5; if slice.len() <= 10 { return ALPROTO_FAILED; } @@ -402,6 +405,9 @@ pub unsafe extern "C" fn rs_krb5_probing_parser_tcp(_flow: *const Flow, input:*const u8, input_len: u32, rdir: *mut u8) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice = build_slice!(input,input_len as usize); if slice.len() <= 14 { return ALPROTO_FAILED; } match be_u32(slice) as IResult<&[u8],u32> { diff --git a/rust/src/lib.rs b/rust/src/lib.rs index da2859637783..9e58d4d805e8 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -27,6 +27,9 @@ #![allow(clippy::let_and_return)] #![allow(clippy::uninlined_format_args)] +// We find this is beyond what the linter should flag. +#![allow(clippy::items_after_test_module)] + // We find this makes sense at time. #![allow(clippy::module_inception)] @@ -81,6 +84,8 @@ pub mod filetracker; pub mod kerberos; pub mod detect; +pub mod ja4; + #[cfg(feature = "lua")] pub mod lua; @@ -116,3 +121,4 @@ pub mod plugin; pub mod lzma; pub mod util; pub mod ffi; +pub mod feature; diff --git a/rust/src/log.rs b/rust/src/log.rs index 744169a97039..7bf0be8a97c6 100644 --- a/rust/src/log.rs +++ b/rust/src/log.rs @@ -29,7 +29,7 @@ pub enum Level { NotSet = -1, _None = 0, Error, - _Warning, + Warning, Notice, Info, _Perf, @@ -115,6 +115,13 @@ macro_rules!SCLogError { }; } +#[macro_export] +macro_rules!SCLogWarning { + ($($arg:tt)*) => { + $crate::do_log!($crate::log::Level::Warning, $($arg)*); + }; +} + #[macro_export] macro_rules!SCLogNotice { ($($arg:tt)*) => { diff --git a/rust/src/modbus/detect.rs b/rust/src/modbus/detect.rs index fdd5d51db9f5..4dc14d03783e 100644 --- a/rust/src/modbus/detect.rs +++ b/rust/src/modbus/detect.rs @@ -63,9 +63,9 @@ pub struct DetectModbusRust { fn check_match_range(sig_range: &Range, trans_range: RangeInclusive) -> bool { if sig_range.start == sig_range.end { sig_range.start >= *trans_range.start() && sig_range.start <= *trans_range.end() - } else if sig_range.start == std::u16::MIN { + } else if sig_range.start == u16::MIN { sig_range.end > *trans_range.start() - } else if sig_range.end == std::u16::MAX { + } else if sig_range.end == u16::MAX { sig_range.start < *trans_range.end() } else { sig_range.start < *trans_range.end() && *trans_range.start() < sig_range.end @@ -78,9 +78,9 @@ fn check_match_range(sig_range: &Range, trans_range: RangeInclusive) - fn check_match(sig_range: &Range, value: u16) -> bool { if sig_range.start == sig_range.end { sig_range.start == value - } else if sig_range.start == std::u16::MIN { + } else if sig_range.start == u16::MIN { sig_range.end > value - } else if sig_range.end == std::u16::MAX { + } else if sig_range.end == u16::MAX { sig_range.start < value } else { sig_range.start < value && value < sig_range.end @@ -90,8 +90,8 @@ fn check_match(sig_range: &Range, value: u16) -> bool { /// Gets the min/max range of an alert signature from the respective capture groups. /// In the case where the max is not given, it is set based on the first char of the min str /// which indicates what range we are looking for: -/// '<' = std::u16::MIN..min -/// '>' = min..std::u16::MAX +/// '<' = u16::MIN..min +/// '>' = min..u16::MAX /// _ = min..min /// If the max is given, the range returned is min..max fn parse_range(min_str: &str, max_str: &str) -> Result, ()> { @@ -100,8 +100,8 @@ fn parse_range(min_str: &str, max_str: &str) -> Result, ()> { debug_validate_bug_on!(!sign.is_ascii_digit() && sign != '<' && sign != '>'); match min_str[!sign.is_ascii_digit() as usize..].parse::() { Ok(num) => match sign { - '>' => Ok(num..std::u16::MAX), - '<' => Ok(std::u16::MIN..num), + '>' => Ok(num..u16::MAX), + '<' => Ok(u16::MIN..num), _ => Ok(num..num), }, Err(_) => { @@ -524,7 +524,7 @@ mod test { parse_access("access write coils, address <500"), Ok(DetectModbusRust { access_type: Some(AccessType::WRITE | AccessType::COILS), - address: Some(std::u16::MIN..500), + address: Some(u16::MIN..500), ..Default::default() }) ); @@ -532,7 +532,7 @@ mod test { parse_access("access write coils, address >500"), Ok(DetectModbusRust { access_type: Some(AccessType::WRITE | AccessType::COILS), - address: Some(500..std::u16::MAX), + address: Some(500..u16::MAX), ..Default::default() }) ); @@ -541,7 +541,7 @@ mod test { Ok(DetectModbusRust { access_type: Some(AccessType::WRITE | AccessType::HOLDING), address: Some(100..100), - value: Some(std::u16::MIN..1000), + value: Some(u16::MIN..1000), ..Default::default() }) ); @@ -583,7 +583,7 @@ mod test { assert_eq!( parse_unit_id("unit <11"), Ok(DetectModbusRust { - unit_id: Some(std::u16::MIN..11), + unit_id: Some(u16::MIN..11), ..Default::default() }) ); @@ -649,7 +649,7 @@ mod test { &DetectModbusRust { access_type: Some(AccessType::WRITE | AccessType::HOLDING), address: Some(15..15), - value: Some(std::u16::MIN..4660), + value: Some(u16::MIN..4660), ..Default::default() } ), @@ -701,7 +701,7 @@ mod test { &DetectModbusRust { access_type: Some(AccessType::WRITE | AccessType::HOLDING), address: Some(15..15), - value: Some(4660..std::u16::MAX), + value: Some(4660..u16::MAX), ..Default::default() } ), @@ -714,7 +714,7 @@ mod test { &DetectModbusRust { access_type: Some(AccessType::WRITE | AccessType::HOLDING), address: Some(16..16), - value: Some(std::u16::MIN..22137), + value: Some(u16::MIN..22137), ..Default::default() } ), @@ -727,7 +727,7 @@ mod test { &DetectModbusRust { access_type: Some(AccessType::WRITE | AccessType::HOLDING), address: Some(16..16), - value: Some(std::u16::MIN..22137), + value: Some(u16::MIN..22137), ..Default::default() } ), @@ -779,7 +779,7 @@ mod test { &DetectModbusRust { access_type: Some(AccessType::WRITE | AccessType::HOLDING), address: Some(17..17), - value: Some(39611..std::u16::MAX), + value: Some(39611..u16::MAX), ..Default::default() } ), @@ -823,7 +823,7 @@ mod test { rs_modbus_inspect( &modbus.transactions[0], &DetectModbusRust { - unit_id: Some(11..std::u16::MAX), + unit_id: Some(11..u16::MAX), ..Default::default() } ), @@ -834,7 +834,7 @@ mod test { rs_modbus_inspect( &modbus.transactions[0], &DetectModbusRust { - unit_id: Some(std::u16::MIN..9), + unit_id: Some(u16::MIN..9), ..Default::default() } ), @@ -867,7 +867,7 @@ mod test { rs_modbus_inspect( &modbus.transactions[0], &DetectModbusRust { - unit_id: Some(9..std::u16::MAX), + unit_id: Some(9..u16::MAX), ..Default::default() } ), @@ -878,7 +878,7 @@ mod test { rs_modbus_inspect( &modbus.transactions[0], &DetectModbusRust { - unit_id: Some(std::u16::MIN..11), + unit_id: Some(u16::MIN..11), ..Default::default() } ), @@ -1206,7 +1206,7 @@ mod test { &modbus.transactions[5], &DetectModbusRust { access_type: Some(AccessType::READ | AccessType::INPUT), - address: Some(std::u16::MIN..9), + address: Some(u16::MIN..9), ..Default::default() } ), @@ -1230,7 +1230,7 @@ mod test { &modbus.transactions[5], &DetectModbusRust { access_type: Some(AccessType::READ | AccessType::INPUT), - address: Some(104..std::u16::MAX), + address: Some(104..u16::MAX), ..Default::default() } ), @@ -1266,7 +1266,7 @@ mod test { &modbus.transactions[5], &DetectModbusRust { access_type: Some(AccessType::READ | AccessType::INPUT), - address: Some(std::u16::MIN..10), + address: Some(u16::MIN..10), ..Default::default() } ), @@ -1290,7 +1290,7 @@ mod test { &modbus.transactions[5], &DetectModbusRust { access_type: Some(AccessType::READ | AccessType::INPUT), - address: Some(103..std::u16::MAX), + address: Some(103..u16::MAX), ..Default::default() } ), diff --git a/rust/src/modbus/modbus.rs b/rust/src/modbus/modbus.rs index 246e9cae6d6c..b09c26b11894 100644 --- a/rust/src/modbus/modbus.rs +++ b/rust/src/modbus/modbus.rs @@ -189,7 +189,7 @@ impl ModbusState { None => { let mut tx = match self.new_tx() { Some(tx) => tx, - None => return AppLayerResult::ok(), + None => return AppLayerResult::err(), }; tx.set_events_from_flags(&msg.error_flags); tx.request = Some(msg); @@ -215,7 +215,7 @@ impl ModbusState { None => { let mut tx = match self.new_tx() { Some(tx) => tx, - None => return AppLayerResult::ok(), + None => return AppLayerResult::err(), }; if msg .access_type @@ -274,6 +274,9 @@ impl ModbusState { pub extern "C" fn rs_modbus_probe( _flow: *const core::Flow, _direction: u8, input: *const u8, len: u32, _rdir: *mut u8, ) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice: &[u8] = unsafe { std::slice::from_raw_parts(input as *mut u8, len as usize) }; match MODBUS_PARSER.probe(slice, Direction::Unknown) { Status::Recognized => unsafe { ALPROTO_MODBUS }, diff --git a/rust/src/mqtt/mqtt.rs b/rust/src/mqtt/mqtt.rs index f1c37d83c881..b31b6eacea5d 100644 --- a/rust/src/mqtt/mqtt.rs +++ b/rust/src/mqtt/mqtt.rs @@ -33,7 +33,7 @@ use std::ffi::CString; // packet in a connection. Note that there is no risk of collision with a // parsed packet identifier because in the protocol these are only 16 bit // unsigned. -const MQTT_CONNECT_PKT_ID: u32 = std::u32::MAX; +const MQTT_CONNECT_PKT_ID: u32 = u32::MAX; // Maximum message length in bytes. If the length of a message exceeds // this value, it will be truncated. Default: 1MB. static mut MAX_MSG_LEN: u32 = 1048576; @@ -183,11 +183,11 @@ impl MQTTState { } fn new_tx(&mut self, msg: MQTTMessage, toclient: bool) -> MQTTTransaction { - let direction = if toclient { - Direction::ToClient - } else { - Direction::ToServer - }; + let direction = if toclient { + Direction::ToClient + } else { + Direction::ToServer + }; let mut tx = MQTTTransaction::new(msg, direction); self.tx_id += 1; tx.tx_id = self.tx_id; @@ -217,104 +217,82 @@ impl MQTTState { match msg.op { MQTTOperation::CONNECT(ref conn) => { self.protocol_version = conn.protocol_version; + let mut tx = self.new_tx(msg, toclient); + tx.pkt_id = Some(MQTT_CONNECT_PKT_ID); if self.connected { - let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::DoubleConnect); - self.transactions.push_back(tx); - } else { - let mut tx = self.new_tx(msg, toclient); - tx.pkt_id = Some(MQTT_CONNECT_PKT_ID); - self.transactions.push_back(tx); } + self.transactions.push_back(tx); } MQTTOperation::PUBLISH(ref publish) => { - if !self.connected { - let mut tx = self.new_tx(msg, toclient); - MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); - self.transactions.push_back(tx); - return; - } - match msg.header.qos_level { + let qos = msg.header.qos_level; + let pkt_id = publish.message_id; + let mut tx = self.new_tx(msg, toclient); + match qos { 0 => { // with QOS level 0, we do not need to wait for a // response - let mut tx = self.new_tx(msg, toclient); tx.complete = true; - self.transactions.push_back(tx); } 1..=2 => { - if let Some(pkt_id) = publish.message_id { - let mut tx = self.new_tx(msg, toclient); + if let Some(pkt_id) = pkt_id { tx.pkt_id = Some(pkt_id as u32); - self.transactions.push_back(tx); } else { - let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::MissingMsgId); - self.transactions.push_back(tx); } } _ => { - let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::InvalidQosLevel); - self.transactions.push_back(tx); } } - } - MQTTOperation::SUBSCRIBE(ref subscribe) => { if !self.connected { - let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); - self.transactions.push_back(tx); - return; } + self.transactions.push_back(tx); + } + MQTTOperation::SUBSCRIBE(ref subscribe) => { let pkt_id = subscribe.message_id as u32; - match msg.header.qos_level { + let qos = msg.header.qos_level; + let mut tx = self.new_tx(msg, toclient); + match qos { 0 => { // with QOS level 0, we do not need to wait for a // response - let mut tx = self.new_tx(msg, toclient); tx.complete = true; - self.transactions.push_back(tx); } 1..=2 => { - let mut tx = self.new_tx(msg, toclient); tx.pkt_id = Some(pkt_id); - self.transactions.push_back(tx); } _ => { - let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::InvalidQosLevel); - self.transactions.push_back(tx); } } - } - MQTTOperation::UNSUBSCRIBE(ref unsubscribe) => { if !self.connected { - let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); - self.transactions.push_back(tx); - return; } + self.transactions.push_back(tx); + } + MQTTOperation::UNSUBSCRIBE(ref unsubscribe) => { let pkt_id = unsubscribe.message_id as u32; - match msg.header.qos_level { + let qos = msg.header.qos_level; + let mut tx = self.new_tx(msg, toclient); + match qos { 0 => { // with QOS level 0, we do not need to wait for a // response - let mut tx = self.new_tx(msg, toclient); tx.complete = true; - self.transactions.push_back(tx); } 1..=2 => { - let mut tx = self.new_tx(msg, toclient); tx.pkt_id = Some(pkt_id); - self.transactions.push_back(tx); } _ => { - let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::InvalidQosLevel); - self.transactions.push_back(tx); } } + if !self.connected { + MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); + } + self.transactions.push_back(tx); } MQTTOperation::CONNACK(ref _connack) => { if let Some(tx) = self.get_tx_by_pkt_id(MQTT_CONNECT_PKT_ID) { @@ -325,31 +303,24 @@ impl MQTTState { } else { let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::MissingConnect); + tx.complete = true; self.transactions.push_back(tx); } } MQTTOperation::PUBREC(ref v) | MQTTOperation::PUBREL(ref v) => { - if !self.connected { - let mut tx = self.new_tx(msg, toclient); - MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); - self.transactions.push_back(tx); - return; - } if let Some(tx) = self.get_tx_by_pkt_id(v.message_id as u32) { tx.msg.push(msg); } else { let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::MissingPublish); + if !self.connected { + MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); + } + tx.complete = true; self.transactions.push_back(tx); } } MQTTOperation::PUBACK(ref v) | MQTTOperation::PUBCOMP(ref v) => { - if !self.connected { - let mut tx = self.new_tx(msg, toclient); - MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); - self.transactions.push_back(tx); - return; - } if let Some(tx) = self.get_tx_by_pkt_id(v.message_id as u32) { tx.msg.push(msg); tx.complete = true; @@ -357,16 +328,14 @@ impl MQTTState { } else { let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::MissingPublish); + if !self.connected { + MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); + } + tx.complete = true; self.transactions.push_back(tx); } } MQTTOperation::SUBACK(ref suback) => { - if !self.connected { - let mut tx = self.new_tx(msg, toclient); - MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); - self.transactions.push_back(tx); - return; - } if let Some(tx) = self.get_tx_by_pkt_id(suback.message_id as u32) { tx.msg.push(msg); tx.complete = true; @@ -374,16 +343,14 @@ impl MQTTState { } else { let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::MissingSubscribe); + if !self.connected { + MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); + } + tx.complete = true; self.transactions.push_back(tx); } } MQTTOperation::UNSUBACK(ref unsuback) => { - if !self.connected { - let mut tx = self.new_tx(msg, toclient); - MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); - self.transactions.push_back(tx); - return; - } if let Some(tx) = self.get_tx_by_pkt_id(unsuback.message_id as u32) { tx.msg.push(msg); tx.complete = true; @@ -391,6 +358,10 @@ impl MQTTState { } else { let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::MissingUnsubscribe); + if !self.connected { + MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); + } + tx.complete = true; self.transactions.push_back(tx); } } @@ -406,25 +377,19 @@ impl MQTTState { self.transactions.push_back(tx); } MQTTOperation::AUTH(_) | MQTTOperation::DISCONNECT(_) => { + let mut tx = self.new_tx(msg, toclient); + tx.complete = true; if !self.connected { - let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); - self.transactions.push_back(tx); - return; } - let mut tx = self.new_tx(msg, toclient); - tx.complete = true; self.transactions.push_back(tx); } MQTTOperation::PINGREQ | MQTTOperation::PINGRESP => { + let mut tx = self.new_tx(msg, toclient); + tx.complete = true; if !self.connected { - let mut tx = self.new_tx(msg, toclient); MQTTState::set_event(&mut tx, MQTTEvent::UnintroducedMessage); - self.transactions.push_back(tx); - return; } - let mut tx = self.new_tx(msg, toclient); - tx.complete = true; self.transactions.push_back(tx); } } @@ -468,8 +433,8 @@ impl MQTTState { let _pdu = Frame::new( flow, &stream_slice, - input, - current.len() as i64, + current, + (current.len() - rem.len()) as i64, MQTTFrameType::Pdu as u8, ); SCLogDebug!("request msg {:?}", msg); @@ -553,8 +518,8 @@ impl MQTTState { let _pdu = Frame::new( flow, &stream_slice, - input, - input.len() as i64, + current, + (current.len() - rem.len()) as i64, MQTTFrameType::Pdu as u8, ); @@ -608,7 +573,11 @@ impl MQTTState { } fn set_event_notx(&mut self, event: MQTTEvent, toclient: bool) { - let mut tx = MQTTTransaction::new_empty(if toclient { Direction::ToClient } else { Direction::ToServer }); + let mut tx = MQTTTransaction::new_empty(if toclient { + Direction::ToClient + } else { + Direction::ToServer + }); self.tx_id += 1; tx.tx_id = self.tx_id; if toclient { @@ -647,6 +616,9 @@ impl MQTTState { pub unsafe extern "C" fn rs_mqtt_probing_parser( _flow: *const Flow, _direction: u8, input: *const u8, input_len: u32, _rdir: *mut u8, ) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let buf = build_slice!(input, input_len as usize); match parse_fixed_header(buf) { Ok((_, hdr)) => { @@ -780,8 +752,7 @@ export_state_data_get!(rs_mqtt_get_state_data, MQTTState); #[no_mangle] pub unsafe extern "C" fn rs_mqtt_register_parser(cfg_max_msg_len: u32) { let default_port = CString::new("[1883]").unwrap(); - let max_msg_len = &mut MAX_MSG_LEN; - *max_msg_len = cfg_max_msg_len; + MAX_MSG_LEN = cfg_max_msg_len; let parser = RustParser { name: PARSER_NAME.as_ptr() as *const std::os::raw::c_char, default_port: default_port.as_ptr(), diff --git a/rust/src/nfs/nfs.rs b/rust/src/nfs/nfs.rs index dfb5e0e72445..b472689a5104 100644 --- a/rust/src/nfs/nfs.rs +++ b/rust/src/nfs/nfs.rs @@ -497,7 +497,7 @@ impl NFSState { } // TODO maybe not enough users to justify a func - pub fn mark_response_tx_done(&mut self, xid: u32, rpc_status: u32, nfs_status: u32, resp_handle: &Vec) + pub fn mark_response_tx_done(&mut self, xid: u32, rpc_status: u32, nfs_status: u32, resp_handle: &[u8]) { if let Some(mytx) = self.get_tx_by_xid(xid) { mytx.response_done = true; @@ -1881,6 +1881,9 @@ pub unsafe extern "C" fn rs_nfs_probe_ms( direction: u8, input: *const u8, len: u32, rdir: *mut u8) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice: &[u8] = build_slice!(input, len as usize); SCLogDebug!("rs_nfs_probe_ms: probing direction {:02x}", direction); let mut adirection : u8 = 0; @@ -1920,6 +1923,9 @@ pub unsafe extern "C" fn rs_nfs_probe(_f: *const Flow, _rdir: *mut u8) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice: &[u8] = build_slice!(input, len as usize); SCLogDebug!("rs_nfs_probe: running probe"); match nfs_probe(slice, direction.into()) { @@ -1938,6 +1944,9 @@ pub unsafe extern "C" fn rs_nfs_probe_udp_ts(_f: *const Flow, _rdir: *mut u8) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice: &[u8] = build_slice!(input, len as usize); match nfs_probe_udp(slice, Direction::ToServer) { 1 => { ALPROTO_NFS }, @@ -1955,6 +1964,9 @@ pub unsafe extern "C" fn rs_nfs_probe_udp_tc(_f: *const Flow, _rdir: *mut u8) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice: &[u8] = build_slice!(input, len as usize); match nfs_probe_udp(slice, Direction::ToClient) { 1 => { ALPROTO_NFS }, diff --git a/rust/src/ntp/ntp.rs b/rust/src/ntp/ntp.rs index c7b3b3d01826..3a06cfb8b14a 100644 --- a/rust/src/ntp/ntp.rs +++ b/rust/src/ntp/ntp.rs @@ -245,6 +245,9 @@ pub extern "C" fn ntp_probing_parser(_flow: *const Flow, input:*const u8, input_len: u32, _rdir: *mut u8) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice: &[u8] = unsafe { std::slice::from_raw_parts(input as *mut u8, input_len as usize) }; let alproto = unsafe{ ALPROTO_NTP }; match parse_ntp(slice) { diff --git a/rust/src/pgsql/logger.rs b/rust/src/pgsql/logger.rs index 03b1ad0f4677..934b549a1671 100644 --- a/rust/src/pgsql/logger.rs +++ b/rust/src/pgsql/logger.rs @@ -78,8 +78,6 @@ fn log_request(req: &PgsqlFEMessage, flags: u32) -> Result { if flags & PGSQL_LOG_PASSWORDS != 0 { js.set_string_from_bytes("password", payload)?; - } else { - js.set_string(req.to_str(), "password log disabled")?; } } PgsqlFEMessage::SASLResponse(RegularPacket { @@ -96,12 +94,27 @@ fn log_request(req: &PgsqlFEMessage, flags: u32) -> Result { js.set_string_from_bytes(req.to_str(), payload)?; } + PgsqlFEMessage::CancelRequest(CancelRequestMessage { + pid, + backend_key, + }) => { + js.set_string("message", "cancel_request")?; + js.set_uint("process_id", (*pid).into())?; + js.set_uint("secret_key", (*backend_key).into())?; + } PgsqlFEMessage::Terminate(TerminationMessage { identifier: _, length: _, }) => { js.set_string("message", req.to_str())?; } + PgsqlFEMessage::UnknownMessageType(RegularPacket { + identifier: _, + length: _, + payload: _, + }) => { + // We don't want to log these, for now. Cf redmine: #6576 + } } js.close()?; Ok(js) @@ -177,12 +190,10 @@ fn log_response(res: &PgsqlBEMessage, jb: &mut JsonBuilder) -> Result<(), JsonEr } PgsqlBEMessage::UnknownMessageType(RegularPacket { identifier: _, - length, - payload, + length: _, + payload: _, }) => { - // jb.set_string_from_bytes("identifier", identifier.to_vec())?; - jb.set_uint("length", (*length).into())?; - jb.set_string_from_bytes("payload", payload)?; + // We don't want to log these, for now. Cf redmine: #6576 } PgsqlBEMessage::AuthenticationOk(_) | PgsqlBEMessage::AuthenticationCleartextPassword(_) @@ -223,11 +234,10 @@ fn log_response(res: &PgsqlBEMessage, jb: &mut JsonBuilder) -> Result<(), JsonEr } PgsqlBEMessage::ConsolidatedDataRow(ConsolidatedDataRowPacket { identifier: _, - length: _, row_cnt, data_size, }) => { - jb.set_uint("data_rows", (*row_cnt).into())?; + jb.set_uint("data_rows", *row_cnt)?; jb.set_uint("data_size", *data_size)?; } PgsqlBEMessage::NotificationResponse(NotificationResponse { diff --git a/rust/src/pgsql/parser.rs b/rust/src/pgsql/parser.rs index ae07d5d5a078..502d3529fc03 100644 --- a/rust/src/pgsql/parser.rs +++ b/rust/src/pgsql/parser.rs @@ -23,7 +23,7 @@ use crate::common::nom7::take_until_and_consume; use nom7::branch::alt; use nom7::bytes::streaming::{tag, take, take_until, take_until1}; use nom7::character::streaming::{alphanumeric1, char}; -use nom7::combinator::{all_consuming, cond, eof, map_parser, opt, peek, rest, verify}; +use nom7::combinator::{all_consuming, cond, eof, map_parser, opt, peek, verify}; use nom7::error::{make_error, ErrorKind}; use nom7::multi::{many1, many_m_n, many_till}; use nom7::number::streaming::{be_i16, be_i32}; @@ -34,9 +34,14 @@ use nom7::{Err, IResult}; pub const PGSQL_LENGTH_FIELD: u32 = 4; pub const PGSQL_DUMMY_PROTO_MAJOR: u16 = 1234; // 0x04d2 +pub const PGSQL_DUMMY_PROTO_CANCEL_REQUEST: u16 = 5678; // 0x162e pub const PGSQL_DUMMY_PROTO_MINOR_SSL: u16 = 5679; //0x162f pub const _PGSQL_DUMMY_PROTO_MINOR_GSSAPI: u16 = 5680; // 0x1630 +fn parse_length(i: &[u8]) -> IResult<&[u8], u32> { + verify(be_u32, |&x| x >= PGSQL_LENGTH_FIELD)(i) +} + #[derive(Debug, PartialEq, Eq)] pub enum PgsqlParameters { // startup parameters @@ -205,8 +210,7 @@ pub struct BackendKeyDataMessage { #[derive(Debug, PartialEq, Eq)] pub struct ConsolidatedDataRowPacket { pub identifier: u8, - pub length: u32, - pub row_cnt: u16, + pub row_cnt: u64, pub data_size: u64, } @@ -311,6 +315,12 @@ pub struct TerminationMessage { pub length: u32, } +#[derive(Debug, PartialEq, Eq)] +pub struct CancelRequestMessage { + pub pid: u32, + pub backend_key: u32, +} + #[derive(Debug, PartialEq, Eq)] pub enum PgsqlFEMessage { SSLRequest(DummyStartupPacket), @@ -319,7 +329,9 @@ pub enum PgsqlFEMessage { SASLInitialResponse(SASLInitialResponsePacket), SASLResponse(RegularPacket), SimpleQuery(RegularPacket), + CancelRequest(CancelRequestMessage), Terminate(TerminationMessage), + UnknownMessageType(RegularPacket), } impl PgsqlFEMessage { @@ -331,7 +343,9 @@ impl PgsqlFEMessage { PgsqlFEMessage::SASLInitialResponse(_) => "sasl_initial_response", PgsqlFEMessage::SASLResponse(_) => "sasl_response", PgsqlFEMessage::SimpleQuery(_) => "simple_query", + PgsqlFEMessage::CancelRequest(_) => "cancel_request", PgsqlFEMessage::Terminate(_) => "termination_message", + PgsqlFEMessage::UnknownMessageType(_) => "unknown_message_type", } } } @@ -562,7 +576,7 @@ fn parse_sasl_initial_response_payload(i: &[u8]) -> IResult<&[u8], (SASLAuthenti pub fn parse_sasl_initial_response(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { let (i, identifier) = verify(be_u8, |&x| x == b'p')(i)?; - let (i, length) = verify(be_u32, |&x| x > PGSQL_LENGTH_FIELD)(i)?; + let (i, length) = parse_length(i)?; let (i, payload) = map_parser(take(length - PGSQL_LENGTH_FIELD), parse_sasl_initial_response_payload)(i)?; Ok((i, PgsqlFEMessage::SASLInitialResponse( SASLInitialResponsePacket { @@ -576,7 +590,7 @@ pub fn parse_sasl_initial_response(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { pub fn parse_sasl_response(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { let (i, identifier) = verify(be_u8, |&x| x == b'p')(i)?; - let (i, length) = verify(be_u32, |&x| x > PGSQL_LENGTH_FIELD)(i)?; + let (i, length) = parse_length(i)?; let (i, payload) = take(length - PGSQL_LENGTH_FIELD)(i)?; let resp = PgsqlFEMessage::SASLResponse( RegularPacket { @@ -605,16 +619,20 @@ pub fn pgsql_parse_startup_packet(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { }, PGSQL_DUMMY_PROTO_MAJOR => { let (b, proto_major) = be_u16(b)?; - let (b, proto_minor) = all_consuming(be_u16)(b)?; - let _message = match proto_minor { - PGSQL_DUMMY_PROTO_MINOR_SSL => (len, proto_major, proto_minor), + let (b, proto_minor) = be_u16(b)?; + let (b, message) = match proto_minor { + PGSQL_DUMMY_PROTO_CANCEL_REQUEST => { + parse_cancel_request(b)? + }, + PGSQL_DUMMY_PROTO_MINOR_SSL => (b, PgsqlFEMessage::SSLRequest(DummyStartupPacket{ + length: len, + proto_major, + proto_minor + })), _ => return Err(Err::Error(make_error(b, ErrorKind::Switch))), }; - (b, PgsqlFEMessage::SSLRequest(DummyStartupPacket{ - length: len, - proto_major, - proto_minor})) + (b, message) } _ => return Err(Err::Error(make_error(b, ErrorKind::Switch))), }; @@ -636,7 +654,7 @@ pub fn pgsql_parse_startup_packet(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { // Password can be encrypted or in cleartext pub fn parse_password_message(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { let (i, identifier) = verify(be_u8, |&x| x == b'p')(i)?; - let (i, length) = verify(be_u32, |&x| x >= PGSQL_LENGTH_FIELD)(i)?; + let (i, length) = parse_length(i)?; let (i, password) = map_parser( take(length - PGSQL_LENGTH_FIELD), take_until1("\x00") @@ -651,7 +669,7 @@ pub fn parse_password_message(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { fn parse_simple_query(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { let (i, identifier) = verify(be_u8, |&x| x == b'Q')(i)?; - let (i, length) = verify(be_u32, |&x| x > PGSQL_LENGTH_FIELD)(i)?; + let (i, length) = parse_length(i)?; let (i, query) = map_parser(take(length - PGSQL_LENGTH_FIELD), take_until1("\x00"))(i)?; Ok((i, PgsqlFEMessage::SimpleQuery(RegularPacket { identifier, @@ -660,9 +678,18 @@ fn parse_simple_query(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { }))) } +fn parse_cancel_request(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { + let (i, pid) = be_u32(i)?; + let (i, backend_key) = be_u32(i)?; + Ok((i, PgsqlFEMessage::CancelRequest(CancelRequestMessage { + pid, + backend_key, + }))) +} + fn parse_terminate_message(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { let (i, identifier) = verify(be_u8, |&x| x == b'X')(i)?; - let (i, length) = verify(be_u32, |&x| x == PGSQL_LENGTH_FIELD)(i)?; + let (i, length) = parse_length(i)?; Ok((i, PgsqlFEMessage::Terminate(TerminationMessage { identifier, length }))) } @@ -673,7 +700,17 @@ pub fn parse_request(i: &[u8]) -> IResult<&[u8], PgsqlFEMessage> { b'\0' => pgsql_parse_startup_packet(i)?, b'Q' => parse_simple_query(i)?, b'X' => parse_terminate_message(i)?, - _ => return Err(Err::Error(make_error(i, ErrorKind::Switch))), + _ => { + let (i, identifier) = be_u8(i)?; + let (i, length) = verify(be_u32, |&x| x > PGSQL_LENGTH_FIELD)(i)?; + let (i, payload) = take(length - PGSQL_LENGTH_FIELD)(i)?; + let unknown = PgsqlFEMessage::UnknownMessageType (RegularPacket{ + identifier, + length, + payload: payload.to_vec(), + }); + (i, unknown) + } }; Ok((i, message)) } @@ -682,7 +719,6 @@ fn pgsql_parse_authentication_message<'a>(i: &'a [u8]) -> IResult<&'a [u8], Pgsq let (i, identifier) = verify(be_u8, |&x| x == b'R')(i)?; let (i, length) = verify(be_u32, |&x| x >= 8)(i)?; let (i, auth_type) = be_u32(i)?; - let (i, payload) = peek(rest)(i)?; let (i, message) = map_parser( take(length - 8), |b: &'a [u8]| { @@ -692,14 +728,14 @@ fn pgsql_parse_authentication_message<'a>(i: &'a [u8]) -> IResult<&'a [u8], Pgsq identifier, length, auth_type, - payload: payload.to_vec(), + payload: b.to_vec(), }))), 3 => Ok((b, PgsqlBEMessage::AuthenticationCleartextPassword( AuthenticationMessage { identifier, length, auth_type, - payload: payload.to_vec(), + payload: b.to_vec(), }))), 5 => { let (b, salt) = all_consuming(take(4_usize))(b)?; @@ -716,7 +752,7 @@ fn pgsql_parse_authentication_message<'a>(i: &'a [u8]) -> IResult<&'a [u8], Pgsq identifier, length, auth_type, - payload: payload.to_vec(), + payload: b.to_vec(), }))), // TODO - For SASL, should we parse specific details of the challenge itself? (as seen in: https://github.com/launchbadge/sqlx/blob/master/sqlx-core/src/postgres/message/authentication.rs ) 10 => { @@ -730,23 +766,21 @@ fn pgsql_parse_authentication_message<'a>(i: &'a [u8]) -> IResult<&'a [u8], Pgsq }))) } 11 => { - let (b, sasl_challenge) = rest(i)?; Ok((b, PgsqlBEMessage::AuthenticationSASLContinue( AuthenticationMessage { identifier, length, auth_type, - payload: sasl_challenge.to_vec(), + payload: b.to_vec(), }))) }, 12 => { - let (i, signature) = take(length - 8)(i)?; - Ok((i, PgsqlBEMessage::AuthenticationSASLFinal( + Ok((b, PgsqlBEMessage::AuthenticationSASLFinal( AuthenticationMessage { identifier, length, auth_type, - payload: signature.to_vec(), + payload: b.to_vec(), } ))) } @@ -760,7 +794,7 @@ fn pgsql_parse_authentication_message<'a>(i: &'a [u8]) -> IResult<&'a [u8], Pgsq fn parse_parameter_status_message(i: &[u8]) -> IResult<&[u8], PgsqlBEMessage> { let (i, identifier) = verify(be_u8, |&x| x == b'S')(i)?; - let (i, length) = verify(be_u32, |&x| x >= PGSQL_LENGTH_FIELD)(i)?; + let (i, length) = parse_length(i)?; let (i, param) = map_parser(take(length - PGSQL_LENGTH_FIELD), pgsql_parse_generic_parameter)(i)?; Ok((i, PgsqlBEMessage::ParameterStatus(ParameterStatusMessage { identifier, @@ -791,7 +825,7 @@ fn parse_backend_key_data_message(i: &[u8]) -> IResult<&[u8], PgsqlBEMessage> { fn parse_command_complete(i: &[u8]) -> IResult<&[u8], PgsqlBEMessage> { let (i, identifier) = verify(be_u8, |&x| x == b'C')(i)?; - let (i, length) = verify(be_u32, |&x| x > PGSQL_LENGTH_FIELD)(i)?; + let (i, length) = parse_length(i)?; let (i, payload) = map_parser(take(length - PGSQL_LENGTH_FIELD), take_until("\x00"))(i)?; Ok((i, PgsqlBEMessage::CommandComplete(RegularPacket { identifier, @@ -886,7 +920,6 @@ pub fn parse_consolidated_data_row(i: &[u8]) -> IResult<&[u8], PgsqlBEMessage> { Ok((i, PgsqlBEMessage::ConsolidatedDataRow( ConsolidatedDataRowPacket { identifier, - length, row_cnt: 1, data_size: add_up_data_size(rows), } @@ -1044,12 +1077,13 @@ pub fn pgsql_parse_response(i: &[u8]) -> IResult<&[u8], PgsqlBEMessage> { b'T' => parse_row_description(i)?, b'A' => parse_notification_response(i)?, b'D' => parse_consolidated_data_row(i)?, - // _ => return Err(Err::Error(make_error(i, ErrorKind::Switch))), _ => { - let (i, payload) = rest(i)?; + let (i, identifier) = be_u8(i)?; + let (i, length) = verify(be_u32, |&x| x > PGSQL_LENGTH_FIELD)(i)?; + let (i, payload) = take(length - PGSQL_LENGTH_FIELD)(i)?; let unknown = PgsqlBEMessage::UnknownMessageType (RegularPacket{ - identifier: pseudo_header.0, - length: pseudo_header.1, + identifier, + length, payload: payload.to_vec(), }); (i, unknown) @@ -1247,9 +1281,37 @@ mod tests { let result = parse_request(&buf[0..3]); assert!(result.is_err()); - // TODO add other messages } + #[test] + fn test_cancel_request_message() { + // A cancel request message + let buf: &[u8] = &[ + 0x00, 0x00, 0x00, 0x10, // length: 16 (fixed) + 0x04, 0xd2, 0x16, 0x2e, // 1234.5678 - identifies a cancel request + 0x00, 0x00, 0x76, 0x31, // PID: 30257 + 0x23, 0x84, 0xf7, 0x2d]; // Backend key: 595916589 + let result = parse_cancel_request(buf); + assert!(result.is_ok()); + + let result = parse_cancel_request(&buf[0..3]); + assert!(result.is_err()); + + let result = pgsql_parse_startup_packet(buf); + assert!(result.is_ok()); + + let fail_result = pgsql_parse_startup_packet(&buf[0..3]); + assert!(fail_result.is_err()); + + let result = parse_request(buf); + assert!(result.is_ok()); + + let fail_result = parse_request(&buf[0..3]); + assert!(fail_result.is_err()); + } + + + #[test] fn test_parse_error_response_code() { let buf: &[u8] = &[0x43, 0x32, 0x38, 0x30, 0x30, 0x30, 0x00]; @@ -1859,7 +1921,7 @@ mod tests { let res = PgsqlBEMessage::UnknownMessageType(RegularPacket { identifier: b'`', length: 54, - payload: bad_buf.to_vec(), + payload: bad_buf[5..].to_vec(), }); assert_eq!(result, res); assert!(remainder.is_empty()); diff --git a/rust/src/pgsql/pgsql.rs b/rust/src/pgsql/pgsql.rs index f5fbebc8f950..5c46008c379c 100644 --- a/rust/src/pgsql/pgsql.rs +++ b/rust/src/pgsql/pgsql.rs @@ -50,7 +50,7 @@ pub struct PgsqlTransaction { pub request: Option, pub responses: Vec, - pub data_row_cnt: u16, + pub data_row_cnt: u64, pub data_size: u64, tx_data: AppLayerTxData, @@ -82,10 +82,10 @@ impl PgsqlTransaction { } pub fn incr_row_cnt(&mut self) { - self.data_row_cnt += 1; + self.data_row_cnt = self.data_row_cnt.saturating_add(1); } - pub fn get_row_cnt(&self) -> u16 { + pub fn get_row_cnt(&self) -> u64 { self.data_row_cnt } @@ -117,6 +117,7 @@ pub enum PgsqlStateProgress { DataRowReceived, CommandCompletedReceived, ErrorMessageReceived, + CancelRequestReceived, ConnectionTerminated, #[cfg(test)] UnknownState, @@ -151,7 +152,7 @@ impl Default for PgsqlState { Self::new() } } - + impl PgsqlState { pub fn new() -> Self { Self { @@ -229,6 +230,7 @@ impl PgsqlState { || self.state_progress == PgsqlStateProgress::SimpleQueryReceived || self.state_progress == PgsqlStateProgress::SSLRequestReceived || self.state_progress == PgsqlStateProgress::ConnectionTerminated + || self.state_progress == PgsqlStateProgress::CancelRequestReceived { let tx = self.new_tx(); self.transactions.push_back(tx); @@ -280,10 +282,16 @@ impl PgsqlState { // Important to keep in mind that: "In simple Query mode, the format of retrieved values is always text, except when the given command is a FETCH from a cursor declared with the BINARY option. In that case, the retrieved values are in binary format. The format codes given in the RowDescription message tell which format is being used." (from pgsql official documentation) } + PgsqlFEMessage::CancelRequest(_) => Some(PgsqlStateProgress::CancelRequestReceived), PgsqlFEMessage::Terminate(_) => { SCLogDebug!("Match: Terminate message"); Some(PgsqlStateProgress::ConnectionTerminated) } + PgsqlFEMessage::UnknownMessageType(_) => { + SCLogDebug!("Match: Unknown message type"); + // Not changing state when we don't know the message + None + } } } @@ -313,7 +321,7 @@ impl PgsqlState { // If there was gap, check we can sync up again. if self.request_gap { - if !probe_ts(input) { + if parser::parse_request(input).is_ok() { // The parser now needs to decide what to do as we are not in sync. // For now, we'll just try again next time. SCLogDebug!("Suricata interprets there's a gap in the request"); @@ -479,7 +487,6 @@ impl PgsqlState { let dummy_resp = PgsqlBEMessage::ConsolidatedDataRow(ConsolidatedDataRowPacket { identifier: b'D', - length: tx.get_row_cnt() as u32, // TODO this is ugly. We can probably get rid of `length` field altogether... row_cnt: tx.get_row_cnt(), data_size: tx.data_size, // total byte count of all data_row messages combined }); @@ -527,14 +534,6 @@ impl PgsqlState { } } -/// Probe for a valid PostgreSQL request -/// -/// PGSQL messages don't have a header per se, so we parse the slice for an ok() -fn probe_ts(input: &[u8]) -> bool { - SCLogDebug!("We are in probe_ts"); - parser::parse_request(input).is_ok() -} - /// Probe for a valid PostgreSQL response /// /// Currently, for parser usage only. We have a bit more logic in the function @@ -558,8 +557,20 @@ pub unsafe extern "C" fn rs_pgsql_probing_parser_ts( if input_len >= 1 && !input.is_null() { let slice: &[u8] = build_slice!(input, input_len as usize); - if probe_ts(slice) { - return ALPROTO_PGSQL; + + match parser::parse_request(slice) { + Ok((_, request)) => { + if let PgsqlFEMessage::UnknownMessageType(_) = request { + return ALPROTO_FAILED; + } + return ALPROTO_PGSQL; + } + Err(Err::Incomplete(_)) => { + return ALPROTO_UNKNOWN; + } + Err(_e) => { + return ALPROTO_FAILED; + } } } return ALPROTO_UNKNOWN; @@ -579,7 +590,10 @@ pub unsafe extern "C" fn rs_pgsql_probing_parser_tc( } match parser::pgsql_parse_response(slice) { - Ok((_, _response)) => { + Ok((_, response)) => { + if let PgsqlBEMessage::UnknownMessageType(_) = response { + return ALPROTO_FAILED; + } return ALPROTO_PGSQL; } Err(Err::Incomplete(_)) => { @@ -781,37 +795,6 @@ pub unsafe extern "C" fn rs_pgsql_register_parser() { mod test { use super::*; - #[test] - fn test_request_probe() { - // An SSL Request - let buf: &[u8] = &[0x00, 0x00, 0x00, 0x08, 0x04, 0xd2, 0x16, 0x2f]; - assert!(probe_ts(buf)); - - // incomplete messages, probe must return false - assert!(!probe_ts(&buf[0..6])); - assert!(!probe_ts(&buf[0..3])); - - // length is wrong (7), probe must return false - let buf: &[u8] = &[0x00, 0x00, 0x00, 0x07, 0x04, 0xd2, 0x16, 0x2f]; - assert!(!probe_ts(buf)); - - // A valid startup message/request - let buf: &[u8] = &[ - 0x00, 0x00, 0x00, 0x26, 0x00, 0x03, 0x00, 0x00, 0x75, 0x73, 0x65, 0x72, 0x00, 0x6f, - 0x72, 0x79, 0x78, 0x00, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x00, 0x6d, - 0x61, 0x69, 0x6c, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x00, 0x00, - ]; - assert!(probe_ts(buf)); - - // A non valid startup message/request (length is shorter by one. Would `exact!` help?) - let buf: &[u8] = &[ - 0x00, 0x00, 0x00, 0x25, 0x00, 0x03, 0x00, 0x00, 0x75, 0x73, 0x65, 0x72, 0x00, 0x6f, - 0x72, 0x79, 0x78, 0x00, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x00, 0x6d, - 0x61, 0x69, 0x6c, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x00, 0x00, - ]; - assert!(!probe_ts(buf)); - } - #[test] fn test_response_probe() { /* Authentication Request MD5 password salt value f211a3ed */ diff --git a/rust/src/quic/detect.rs b/rust/src/quic/detect.rs index 7e9019bef004..cd88120646ab 100644 --- a/rust/src/quic/detect.rs +++ b/rust/src/quic/detect.rs @@ -63,6 +63,21 @@ pub unsafe extern "C" fn rs_quic_tx_get_ja3( } } +#[no_mangle] +pub unsafe extern "C" fn rs_quic_tx_get_ja4( + tx: &QuicTransaction, buffer: *mut *const u8, buffer_len: *mut u32, +) -> u8 { + if let Some(ja4) = &tx.ja4 { + *buffer = ja4.as_ptr(); + *buffer_len = ja4.len() as u32; + 1 + } else { + *buffer = ptr::null(); + *buffer_len = 0; + 0 + } +} + #[no_mangle] pub unsafe extern "C" fn rs_quic_tx_get_version( tx: &QuicTransaction, buffer: *mut *const u8, buffer_len: *mut u32, diff --git a/rust/src/quic/frames.rs b/rust/src/quic/frames.rs index e1fb7d080727..0266850fd38a 100644 --- a/rust/src/quic/frames.rs +++ b/rust/src/quic/frames.rs @@ -16,6 +16,7 @@ */ use super::error::QuicError; +use crate::ja4::*; use crate::quic::parser::quic_var_uint; use nom7::bytes::complete::take; use nom7::combinator::{all_consuming, complete}; @@ -136,7 +137,8 @@ pub(crate) struct Crypto { // We remap the Vec from tls_parser::parse_tls_extensions because of // the lifetime of TlsExtension due to references to the slice used for parsing pub extv: Vec, - pub ja3: String, + pub ja3: Option, + pub ja4: Option, } #[derive(Debug, PartialEq)] @@ -235,7 +237,7 @@ fn quic_tls_ja3_client_extends(ja3: &mut String, exts: Vec) { // get interesting stuff out of parsed tls extensions fn quic_get_tls_extensions( - input: Option<&[u8]>, ja3: &mut String, client: bool, + input: Option<&[u8]>, ja3: &mut String, mut ja4: Option<&mut JA4>, client: bool, ) -> Vec { let mut extv = Vec::new(); if let Some(extr) = input { @@ -249,8 +251,21 @@ fn quic_get_tls_extensions( dash = true; } ja3.push_str(&u16::from(etype).to_string()); + if let Some(ref mut ja4) = ja4 { + ja4.add_extension(etype) + } let mut values = Vec::new(); match e { + TlsExtension::SupportedVersions(x) => { + for version in x { + let mut value = Vec::new(); + value.extend_from_slice(version.to_string().as_bytes()); + values.push(value); + if let Some(ref mut ja4) = ja4 { + ja4.set_tls_version(*version); + } + } + } TlsExtension::SNI(x) => { for sni in x { let mut value = Vec::new(); @@ -258,7 +273,22 @@ fn quic_get_tls_extensions( values.push(value); } } + TlsExtension::SignatureAlgorithms(x) => { + for sigalgo in x { + let mut value = Vec::new(); + value.extend_from_slice(sigalgo.to_string().as_bytes()); + values.push(value); + if let Some(ref mut ja4) = ja4 { + ja4.add_signature_algorithm(*sigalgo) + } + } + } TlsExtension::ALPN(x) => { + if !x.is_empty() { + if let Some(ref mut ja4) = ja4 { + ja4.set_alpn(x[0]); + } + } for alpn in x { let mut value = Vec::new(); value.extend_from_slice(alpn); @@ -284,6 +314,8 @@ fn parse_quic_handshake(msg: TlsMessage) -> Option { let mut ja3 = String::with_capacity(256); ja3.push_str(&u16::from(ch.version).to_string()); ja3.push(','); + let mut ja4 = JA4::new(); + ja4.set_quic(); let mut dash = false; for c in &ch.ciphers { if dash { @@ -292,11 +324,25 @@ fn parse_quic_handshake(msg: TlsMessage) -> Option { dash = true; } ja3.push_str(&u16::from(*c).to_string()); + ja4.add_cipher_suite(*c); } ja3.push(','); let ciphers = ch.ciphers; - let extv = quic_get_tls_extensions(ch.ext, &mut ja3, true); - return Some(Frame::Crypto(Crypto { ciphers, extv, ja3 })); + let extv = quic_get_tls_extensions(ch.ext, &mut ja3, Some(&mut ja4), true); + return Some(Frame::Crypto(Crypto { + ciphers, + extv, + ja3: if cfg!(feature = "ja3") { + Some(ja3) + } else { + None + }, + ja4: if cfg!(feature = "ja4") { + Some(ja4) + } else { + None + }, + })); } ServerHello(sh) => { let mut ja3 = String::with_capacity(256); @@ -305,8 +351,17 @@ fn parse_quic_handshake(msg: TlsMessage) -> Option { ja3.push_str(&u16::from(sh.cipher).to_string()); ja3.push(','); let ciphers = vec![sh.cipher]; - let extv = quic_get_tls_extensions(sh.ext, &mut ja3, false); - return Some(Frame::Crypto(Crypto { ciphers, extv, ja3 })); + let extv = quic_get_tls_extensions(sh.ext, &mut ja3, None, false); + return Some(Frame::Crypto(Crypto { + ciphers, + extv, + ja3: if cfg!(feature = "ja3") { + Some(ja3) + } else { + None + }, + ja4: None, + })); } _ => {} } @@ -504,8 +559,7 @@ impl Frame { let mut d = vec![0; crypto_max_size as usize]; for f in &frames { if let Frame::CryptoFrag(c) = f { - d[c.offset as usize..(c.offset + c.length) as usize] - .clone_from_slice(&c.data); + d[c.offset as usize..(c.offset + c.length) as usize].clone_from_slice(&c.data); } } if let Ok((_, msg)) = parse_tls_message_handshake(&d) { diff --git a/rust/src/quic/logger.rs b/rust/src/quic/logger.rs index e03ebdd6bf21..0acc9841e706 100644 --- a/rust/src/quic/logger.rs +++ b/rust/src/quic/logger.rs @@ -88,7 +88,7 @@ fn quic_tls_extension_name(e: u16) -> Option { } } -fn log_template(tx: &QuicTransaction, js: &mut JsonBuilder) -> Result<(), JsonError> { +fn log_template(tx: &QuicTransaction, log_ja4: bool, js: &mut JsonBuilder) -> Result<(), JsonError> { js.open_object("quic")?; if tx.header.ty != QuicType::Short { js.set_string("version", String::from(tx.header.version).as_str())?; @@ -122,6 +122,13 @@ fn log_template(tx: &QuicTransaction, js: &mut JsonBuilder) -> Result<(), JsonEr js.set_string("string", ja3)?; js.close()?; } + + if log_ja4 { + if let Some(ref ja4) = &tx.ja4 { + js.set_string("ja4", ja4)?; + } + } + if !tx.extv.is_empty() { js.open_array("extensions")?; for e in &tx.extv { @@ -150,8 +157,8 @@ fn log_template(tx: &QuicTransaction, js: &mut JsonBuilder) -> Result<(), JsonEr #[no_mangle] pub unsafe extern "C" fn rs_quic_to_json( - tx: *mut std::os::raw::c_void, js: &mut JsonBuilder, + tx: *mut std::os::raw::c_void, log_ja4: bool, js: &mut JsonBuilder, ) -> bool { let tx = cast_pointer!(tx, QuicTransaction); - log_template(tx, js).is_ok() + log_template(tx, log_ja4, js).is_ok() } diff --git a/rust/src/quic/quic.rs b/rust/src/quic/quic.rs index 8e3ea6f35a21..d0bff24cec67 100644 --- a/rust/src/quic/quic.rs +++ b/rust/src/quic/quic.rs @@ -22,7 +22,7 @@ use super::{ parser::{quic_pkt_num, QuicData, QuicHeader, QuicType}, }; use crate::applayer::{self, *}; -use crate::core::{AppProto, Flow, ALPROTO_FAILED, ALPROTO_UNKNOWN, IPPROTO_UDP, Direction}; +use crate::core::{AppProto, Direction, Flow, ALPROTO_FAILED, ALPROTO_UNKNOWN, IPPROTO_UDP}; use std::collections::VecDeque; use std::ffi::CString; use tls_parser::TlsExtensionType; @@ -48,6 +48,7 @@ pub struct QuicTransaction { pub ua: Option>, pub extv: Vec, pub ja3: Option, + pub ja4: Option, pub client: bool, tx_data: AppLayerTxData, } @@ -55,9 +56,13 @@ pub struct QuicTransaction { impl QuicTransaction { fn new( header: QuicHeader, data: QuicData, sni: Option>, ua: Option>, - extv: Vec, ja3: Option, client: bool, + extv: Vec, ja3: Option, ja4: Option, client: bool, ) -> Self { - let direction = if client { Direction::ToServer } else { Direction::ToClient }; + let direction = if client { + Direction::ToServer + } else { + Direction::ToClient + }; let cyu = Cyu::generate(&header, &data.frames); QuicTransaction { tx_id: 0, @@ -67,13 +72,18 @@ impl QuicTransaction { ua, extv, ja3, + ja4, client, tx_data: AppLayerTxData::for_direction(direction), } } fn new_empty(client: bool, header: QuicHeader) -> Self { - let direction = if client { Direction::ToServer } else { Direction::ToClient }; + let direction = if client { + Direction::ToServer + } else { + Direction::ToClient + }; QuicTransaction { tx_id: 0, header, @@ -82,6 +92,7 @@ impl QuicTransaction { ua: None, extv: Vec::new(), ja3: None, + ja4: None, client, tx_data: AppLayerTxData::for_direction(direction), } @@ -132,9 +143,9 @@ impl QuicState { fn new_tx( &mut self, header: QuicHeader, data: QuicData, sni: Option>, ua: Option>, - extb: Vec, ja3: Option, client: bool, + extb: Vec, ja3: Option, ja4: Option, client: bool, ) { - let mut tx = QuicTransaction::new(header, data, sni, ua, extb, ja3, client); + let mut tx = QuicTransaction::new(header, data, sni, ua, extb, ja3, ja4, client); self.max_tx_id += 1; tx.tx_id = self.max_tx_id; self.transactions.push_back(tx); @@ -212,6 +223,7 @@ impl QuicState { let mut sni: Option> = None; let mut ua: Option> = None; let mut ja3: Option = None; + let mut ja4: Option = None; let mut extv: Vec = Vec::new(); for frame in &data.frames { match frame { @@ -230,7 +242,17 @@ impl QuicState { } } Frame::Crypto(c) => { - ja3 = Some(c.ja3.clone()); + if let Some(ja3str) = &c.ja3 { + ja3 = Some(ja3str.clone()); + } + // we only do client fingerprints for now + if to_server { + // our hash is complete, let's only use strings from + // now on + if let Some(ref rja4) = c.ja4 { + ja4 = Some(rja4.get_hash()); + } + } for e in &c.extv { if e.etype == TlsExtensionType::ServerName && !e.values.is_empty() { sni = Some(e.values[0].to_vec()); @@ -246,7 +268,7 @@ impl QuicState { _ => {} } } - self.new_tx(header, data, sni, ua, extv, ja3, to_server); + self.new_tx(header, data, sni, ua, extv, ja3, ja4, to_server); } fn set_event_notx(&mut self, event: QuicEvent, header: QuicHeader, client: bool) { @@ -303,6 +325,7 @@ impl QuicState { None, Vec::new(), None, + None, to_server, ); continue; @@ -353,6 +376,9 @@ pub unsafe extern "C" fn rs_quic_state_tx_free(state: *mut std::os::raw::c_void, pub unsafe extern "C" fn rs_quic_probing_parser( _flow: *const Flow, _direction: u8, input: *const u8, input_len: u32, _rdir: *mut u8, ) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice = build_slice!(input, input_len as usize); if QuicHeader::from_bytes(slice, DEFAULT_DCID_LEN).is_ok() { diff --git a/rust/src/smb/dcerpc.rs b/rust/src/smb/dcerpc.rs index b4c5749eac46..de6b8def73b3 100644 --- a/rust/src/smb/dcerpc.rs +++ b/rust/src/smb/dcerpc.rs @@ -455,10 +455,7 @@ pub fn smb_read_dcerpc_record(state: &mut SMBState, // msg_id 0 as this data crosses cmd/reply pairs let ehdr = SMBHashKeyHdrGuid::new(SMBCommonHdr::new(SMBHDR_TYPE_TRANS_FRAG, hdr.ssn_id, hdr.tree_id, 0_u64), guid.to_vec()); - let mut prevdata = match state.ssnguid2vec_map.remove(&ehdr) { - Some(s) => s, - None => Vec::new(), - }; + let mut prevdata = state.ssnguid2vec_map.remove(&ehdr).unwrap_or_default(); SCLogDebug!("indata {} prevdata {}", indata.len(), prevdata.len()); prevdata.extend_from_slice(indata); let data = prevdata; diff --git a/rust/src/smb/log.rs b/rust/src/smb/log.rs index 84965749ba17..e242d02e486b 100644 --- a/rust/src/smb/log.rs +++ b/rust/src/smb/log.rs @@ -38,7 +38,7 @@ fn debug_add_progress(jsb: &mut JsonBuilder, tx: &SMBTransaction) -> Result<(), /// take in a file GUID (16 bytes) or FID (2 bytes). Also deal /// with our frankenFID (2 bytes + 4 user_id) -fn fuid_to_string(fuid: &Vec) -> String { +fn fuid_to_string(fuid: &[u8]) -> String { let fuid_len = fuid.len(); if fuid_len == 16 { guid_to_string(fuid) @@ -52,7 +52,7 @@ fn fuid_to_string(fuid: &Vec) -> String { } } -fn guid_to_string(guid: &Vec) -> String { +fn guid_to_string(guid: &[u8]) -> String { if guid.len() == 16 { let output = format!("{:02x}{:02x}{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", guid[3], guid[2], guid[1], guid[0], diff --git a/rust/src/smb/ntlmssp_records.rs b/rust/src/smb/ntlmssp_records.rs index d9346294a505..509926cc8476 100644 --- a/rust/src/smb/ntlmssp_records.rs +++ b/rust/src/smb/ntlmssp_records.rs @@ -1,4 +1,4 @@ -/* Copyright (C) 2017-2022 Open Information Security Foundation +/* Copyright (C) 2017-2024 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free @@ -100,32 +100,44 @@ pub fn parse_ntlm_auth_record(i: &[u8]) -> IResult<&[u8], NTLMSSPAuthRecord> { let orig_i = i; let record_len = i.len() + NTLMSSP_IDTYPE_LEN; // identifier (8) and type (4) are cut before we are called + // track start of the data offset let (i, _lm_blob_len) = verify(le_u16, |&v| (v as usize) < record_len)(i)?; let (i, _lm_blob_maxlen) = le_u16(i)?; - let (i, _lm_blob_offset) = verify(le_u32, |&v| (v as usize) < record_len)(i)?; + let (i, lm_blob_offset) = verify(le_u32, |&v| (v as usize) < record_len)(i)?; + let mut data_start = lm_blob_offset; let (i, _ntlmresp_blob_len) = verify(le_u16, |&v| (v as usize) < record_len)(i)?; let (i, _ntlmresp_blob_maxlen) = le_u16(i)?; - let (i, _ntlmresp_blob_offset) = verify(le_u32, |&v| (v as usize) < record_len)(i)?; + let (i, ntlmresp_blob_offset) = verify(le_u32, |&v| (v as usize) < record_len)(i)?; + data_start = std::cmp::min(data_start, ntlmresp_blob_offset); let (i, domain_blob_len) = verify(le_u16, |&v| (v as usize) < record_len)(i)?; let (i, _domain_blob_maxlen) = le_u16(i)?; let (i, domain_blob_offset) = verify(le_u32, |&v| (v as usize) < record_len)(i)?; + data_start = std::cmp::min(data_start, domain_blob_offset); let (i, user_blob_len) = verify(le_u16, |&v| (v as usize) < record_len)(i)?; let (i, _user_blob_maxlen) = le_u16(i)?; let (i, user_blob_offset) = verify(le_u32, |&v| (v as usize) < record_len)(i)?; + data_start = std::cmp::min(data_start, user_blob_offset); let (i, host_blob_len) = verify(le_u16, |&v| (v as usize) < record_len)(i)?; let (i, _host_blob_maxlen) = le_u16(i)?; let (i, host_blob_offset) = verify(le_u32, |&v| (v as usize) < record_len)(i)?; + data_start = std::cmp::min(data_start, host_blob_offset); let (i, _ssnkey_blob_len) = verify(le_u16, |&v| (v as usize) < record_len)(i)?; let (i, _ssnkey_blob_maxlen) = le_u16(i)?; - let (i, _ssnkey_blob_offset) = verify(le_u32, |&v| (v as usize) < record_len)(i)?; + let (i, ssnkey_blob_offset) = verify(le_u32, |&v| (v as usize) < record_len)(i)?; + data_start = std::cmp::min(data_start, ssnkey_blob_offset); let (i, nego_flags) = parse_ntlm_auth_nego_flags(i)?; - let (_, version) = cond(nego_flags.version, parse_ntlm_auth_version)(i)?; + + // Check if we have space for the version before the "data" starts. + let consumed = orig_i.len() - i.len() + NTLMSSP_IDTYPE_LEN; + let has_space_for_version = data_start as usize >= consumed + 8 && nego_flags.version; + + let (_, version) = cond(has_space_for_version, parse_ntlm_auth_version)(i)?; // Caller does not care about remaining input... let (_, domain_blob) = extract_ntlm_substring(orig_i, domain_blob_offset, domain_blob_len)?; diff --git a/rust/src/smb/smb.rs b/rust/src/smb/smb.rs index d6b0a565c060..e64e0a8e6f0b 100644 --- a/rust/src/smb/smb.rs +++ b/rust/src/smb/smb.rs @@ -1045,7 +1045,7 @@ impl SMBState { pub fn get_service_for_guid(&self, guid: &[u8]) -> (&'static str, bool) { - let (name, is_dcerpc) = match self.guid2name_map.get(&guid.to_vec()) { + let (name, is_dcerpc) = match self.guid2name_map.get(guid) { Some(n) => { let mut s = n.as_slice(); // skip leading \ if we have it @@ -2121,7 +2121,7 @@ pub unsafe extern "C" fn rs_smb_probe_begins_tcp(_f: *const Flow, flags: u8, input: *const u8, len: u32, rdir: *mut u8) -> AppProto { - if len < MIN_REC_SIZE as u32 { + if len < MIN_REC_SIZE as u32 || input.is_null() { return ALPROTO_UNKNOWN; } let slice = build_slice!(input, len as usize); @@ -2135,7 +2135,7 @@ pub unsafe extern "C" fn rs_smb_probe_tcp(_f: *const Flow, flags: u8, input: *const u8, len: u32, rdir: *mut u8) -> AppProto { - if len < MIN_REC_SIZE as u32 { + if len < MIN_REC_SIZE as u32 || input.is_null() { return ALPROTO_UNKNOWN; } let slice = build_slice!(input, len as usize); diff --git a/rust/src/smb/smb1.rs b/rust/src/smb/smb1.rs index 9d7d47e27c85..eb2712391150 100644 --- a/rust/src/smb/smb1.rs +++ b/rust/src/smb/smb1.rs @@ -482,7 +482,7 @@ fn smb1_request_record_one(state: &mut SMBState, r: &SmbRecord, command: u8, and state.ssn2vec_map.insert(name_key, name_val); let tx_hdr = SMBCommonHdr::from1(r, SMBHDR_TYPE_GENERICTX); - let tx = state.new_create_tx(&cr.file_name.to_vec(), + let tx = state.new_create_tx(&cr.file_name, cr.disposition, del, dir, tx_hdr); tx.vercmd.set_smb1_cmd(command); SCLogDebug!("TS CREATE TX {} created", tx.id); @@ -894,11 +894,8 @@ pub fn smb1_trans_response_record(state: &mut SMBState, r: &SmbRecord) SCLogDebug!("TRANS response {:?}", rd); // see if we have a stored fid - let fid = match state.ssn2vec_map.remove( - &SMBCommonHdr::from1(r, SMBHDR_TYPE_GUID)) { - Some(f) => f, - None => Vec::new(), - }; + let fid = state.ssn2vec_map.remove( + &SMBCommonHdr::from1(r, SMBHDR_TYPE_GUID)).unwrap_or_default(); SCLogDebug!("FID {:?}", fid); let mut frankenfid = fid.to_vec(); diff --git a/rust/src/snmp/snmp.rs b/rust/src/snmp/snmp.rs index a4481f4bc191..6d67eb49e31c 100644 --- a/rust/src/snmp/snmp.rs +++ b/rust/src/snmp/snmp.rs @@ -357,6 +357,9 @@ pub unsafe extern "C" fn rs_snmp_probing_parser(_flow: *const Flow, input:*const u8, input_len: u32, _rdir: *mut u8) -> AppProto { + if input.is_null() { + return ALPROTO_UNKNOWN; + } let slice = build_slice!(input,input_len as usize); let alproto = ALPROTO_SNMP; if slice.len() < 4 { return ALPROTO_FAILED; } diff --git a/rust/src/ssh/logger.rs b/rust/src/ssh/logger.rs index 9bc7d7c33f39..e83d288a79b0 100644 --- a/rust/src/ssh/logger.rs +++ b/rust/src/ssh/logger.rs @@ -15,7 +15,7 @@ * 02110-1301, USA. */ -use super::ssh::SSHTransaction; +use super::ssh::{SSHTransaction, SSH_MAX_BANNER_LEN}; use crate::jsonbuilder::{JsonBuilder, JsonError}; fn log_ssh(tx: &SSHTransaction, js: &mut JsonBuilder) -> Result { @@ -24,9 +24,9 @@ fn log_ssh(tx: &SSHTransaction, js: &mut JsonBuilder) -> Result } if !tx.cli_hdr.protover.is_empty() { js.open_object("client")?; - js.set_string_from_bytes("proto_version", &tx.cli_hdr.protover)?; + js.set_string_from_bytes_limited("proto_version", &tx.cli_hdr.protover, SSH_MAX_BANNER_LEN)?; if !tx.cli_hdr.swver.is_empty() { - js.set_string_from_bytes("software_version", &tx.cli_hdr.swver)?; + js.set_string_from_bytes_limited("software_version", &tx.cli_hdr.swver, SSH_MAX_BANNER_LEN)?; } if !tx.cli_hdr.hassh.is_empty() || !tx.cli_hdr.hassh_string.is_empty() { js.open_object("hassh")?; @@ -42,9 +42,9 @@ fn log_ssh(tx: &SSHTransaction, js: &mut JsonBuilder) -> Result } if !tx.srv_hdr.protover.is_empty() { js.open_object("server")?; - js.set_string_from_bytes("proto_version", &tx.srv_hdr.protover)?; + js.set_string_from_bytes_limited("proto_version", &tx.srv_hdr.protover, SSH_MAX_BANNER_LEN)?; if !tx.srv_hdr.swver.is_empty() { - js.set_string_from_bytes("software_version", &tx.srv_hdr.swver)?; + js.set_string_from_bytes_limited("software_version", &tx.srv_hdr.swver, SSH_MAX_BANNER_LEN)?; } if !tx.srv_hdr.hassh.is_empty() || !tx.srv_hdr.hassh_string.is_empty() { js.open_object("hassh")?; diff --git a/rust/src/ssh/parser.rs b/rust/src/ssh/parser.rs index bfad8c005a9d..0c57c88229d1 100644 --- a/rust/src/ssh/parser.rs +++ b/rust/src/ssh/parser.rs @@ -198,7 +198,7 @@ impl<'a> SshPacketKeyExchange<'a> { slices .iter() .for_each(|&x| hassh_string.extend_from_slice(x)); - hassh.extend(format!("{:x}", Md5::new().chain(&hassh_string).finalize()).as_bytes()); + hassh.extend(format!("{:x}", Md5::new().chain(hassh_string).finalize()).as_bytes()); } } @@ -552,11 +552,8 @@ mod tests { ,0x00 ,0x00 ,0x00 ,0x00 ,0x00 ,0x00 ,0x00 ,0x00]; let mut hassh_string: Vec = vec!(); let mut hassh: Vec = vec!(); - match ssh_parse_key_exchange(&client_key_exchange){ - Ok((_, key_exchange)) => { - key_exchange.generate_hassh(&mut hassh_string, &mut hassh, &true); - } - Err(_) => { } + if let Ok((_, key_exchange)) = ssh_parse_key_exchange(&client_key_exchange) { + key_exchange.generate_hassh(&mut hassh_string, &mut hassh, &true); } assert_eq!(hassh_string, "curve25519-sha256,curve25519-sha256@libssh.org,\ @@ -643,11 +640,8 @@ mod tests { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]; let mut hassh_server_string: Vec = vec!(); let mut hassh_server: Vec = vec!(); - match ssh_parse_key_exchange(&server_key_exchange){ - Ok((_, key_exchange)) => { - key_exchange.generate_hassh(&mut hassh_server_string, &mut hassh_server, &true); - } - Err(_) => { } + if let Ok((_, key_exchange)) = ssh_parse_key_exchange(&server_key_exchange) { + key_exchange.generate_hassh(&mut hassh_server_string, &mut hassh_server, &true); } assert_eq!(hassh_server, "b12d2871a1189eff20364cf5333619ee".as_bytes().to_vec()); } diff --git a/rust/src/ssh/ssh.rs b/rust/src/ssh/ssh.rs index 6280e0b6ace9..a0586894f9fb 100644 --- a/rust/src/ssh/ssh.rs +++ b/rust/src/ssh/ssh.rs @@ -46,7 +46,7 @@ pub enum SSHConnectionState { SshStateFinished = 3, } -const SSH_MAX_BANNER_LEN: usize = 256; +pub const SSH_MAX_BANNER_LEN: usize = 256; const SSH_RECORD_HEADER_LEN: usize = 6; const SSH_MAX_REASSEMBLED_RECORD_LEN: usize = 65535; @@ -256,7 +256,9 @@ impl SSHState { return r; } Err(Err::Incomplete(_)) => { - return AppLayerResult::incomplete(0_u32, (input.len() + 1) as u32); + // we do not need to retain these bytes + // we parsed them, we skip them + return AppLayerResult::ok(); } Err(_e) => { SCLogDebug!("SSH invalid banner {}", _e); diff --git a/scripts/clang-format.sh b/scripts/clang-format.sh index fe16db07af41..fc69e49dbf31 100755 --- a/scripts/clang-format.sh +++ b/scripts/clang-format.sh @@ -560,9 +560,13 @@ SetTopLevelDir RequireProgram GIT git # ubuntu uses clang-format-{version} name for newer versions. fedora not. -RequireProgram GIT_CLANG_FORMAT git-clang-format-11 git-clang-format-10 git-clang-format-9 git-clang-format +RequireProgram GIT_CLANG_FORMAT git-clang-format-14 git-clang-format-11 git-clang-format-10 git-clang-format-9 git-clang-format GIT_CLANG_FORMAT_BINARY=clang-format -if [[ $GIT_CLANG_FORMAT =~ .*git-clang-format-11$ ]]; then +if [[ $GIT_CLANG_FORMAT =~ .*git-clang-format-14$ ]]; then + # default binary is clang-format, specify the correct version. + # Alternative: git config clangformat.binary "clang-format-14" + GIT_CLANG_FORMAT_BINARY="clang-format-14" +elif [[ $GIT_CLANG_FORMAT =~ .*git-clang-format-11$ ]]; then # default binary is clang-format, specify the correct version. # Alternative: git config clangformat.binary "clang-format-11" GIT_CLANG_FORMAT_BINARY="clang-format-11" diff --git a/scripts/docs-almalinux9-minimal-build.sh b/scripts/docs-almalinux9-minimal-build.sh new file mode 100755 index 000000000000..2b569ff72dc9 --- /dev/null +++ b/scripts/docs-almalinux9-minimal-build.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Serves for RPM-based docs and is verified by Github Actions + +# install-guide-documentation tag start: Minimal RPM-based dependencies +sudo dnf install -y rustc cargo cbindgen +sudo dnf install -y gcc gcc-c++ jansson-devel libpcap-devel \ + libyaml-devel make pcre2-devel zlib-devel +# install-guide-documentation tag end: Minimal RPM-based dependencies \ No newline at end of file diff --git a/scripts/docs-ubuntu-debian-minimal-build.sh b/scripts/docs-ubuntu-debian-minimal-build.sh new file mode 100755 index 000000000000..41d163a94734 --- /dev/null +++ b/scripts/docs-ubuntu-debian-minimal-build.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Serves for Ubuntu/Debian docs and is verified by Github Actions + +# install-guide-documentation tag start: Minimal dependencies +sudo apt -y install autoconf automake build-essential cargo \ + cbindgen libjansson-dev libpcap-dev libpcre2-dev libtool \ + libyaml-dev make pkg-config rustc zlib1g-dev +# install-guide-documentation tag end: Minimal dependencies \ No newline at end of file diff --git a/src/Makefile.am b/src/Makefile.am index 48a5ce850ce2..8377d39e4819 100755 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -229,6 +229,7 @@ noinst_HEADERS = \ detect-ipv6hdr.h \ detect-isdataat.h \ detect-itype.h \ + detect-ja4-hash.h \ detect-krb5-cname.h \ detect-krb5-errcode.h \ detect-krb5-msgtype.h \ @@ -276,6 +277,7 @@ noinst_HEADERS = \ detect-rawbytes.h \ detect-reference.h \ detect-replace.h \ + detect-requires.h \ detect-rev.h \ detect-rfb-name.h \ detect-rfb-secresult.h \ @@ -337,12 +339,15 @@ noinst_HEADERS = \ detect-tls-version.h \ detect-tls-random.h \ detect-tos.h \ + detect-transform-casechange.h \ detect-transform-compress-whitespace.h \ detect-transform-dotprefix.h \ + detect-transform-header-lowercase.h \ detect-transform-md5.h \ detect-transform-pcrexform.h \ detect-transform-sha1.h \ detect-transform-sha256.h \ + detect-transform-strip-pseudo-headers.h \ detect-transform-strip-whitespace.h \ detect-transform-urldecode.h \ detect-transform-xor.h \ @@ -502,6 +507,7 @@ noinst_HEADERS = \ tm-threads-common.h \ tm-threads.h \ tree.h \ + interval-tree.h \ unix-manager.h \ util-action.h \ util-affinity.h \ @@ -547,10 +553,12 @@ noinst_HEADERS = \ util-hash-string.h \ util-host-info.h \ util-host-os-info.h \ + util-hugepages.h \ util-hyperscan.h \ util-ioctl.h \ util-ip.h \ util-ja3.h \ + util-ja4.h \ util-landlock.h \ util-logopenfile.h \ util-log-redis.h \ @@ -617,6 +625,7 @@ noinst_HEADERS = \ util-validate.h \ util-var.h \ util-var-name.h \ + util-port-interval-tree.h \ win32-misc.h \ win32-service.h \ win32-syscall.h \ @@ -838,6 +847,7 @@ libsuricata_c_a_SOURCES = \ detect-ipv6hdr.c \ detect-isdataat.c \ detect-itype.c \ + detect-ja4-hash.c \ detect-krb5-cname.c \ detect-krb5-errcode.c \ detect-krb5-msgtype.c \ @@ -885,6 +895,7 @@ libsuricata_c_a_SOURCES = \ detect-rawbytes.c \ detect-reference.c \ detect-replace.c \ + detect-requires.c \ detect-rev.c \ detect-rfb-name.c \ detect-rfb-secresult.c \ @@ -946,12 +957,15 @@ libsuricata_c_a_SOURCES = \ detect-tls-version.c \ detect-tls-random.c \ detect-tos.c \ + detect-transform-casechange.c \ detect-transform-compress-whitespace.c \ detect-transform-dotprefix.c \ + detect-transform-header-lowercase.c \ detect-transform-md5.c \ detect-transform-pcrexform.c \ detect-transform-sha1.c \ detect-transform-sha256.c \ + detect-transform-strip-pseudo-headers.c \ detect-transform-strip-whitespace.c \ detect-transform-urldecode.c \ detect-transform-xor.c \ @@ -1142,6 +1156,7 @@ libsuricata_c_a_SOURCES = \ util-hash-string.c \ util-host-info.c \ util-host-os-info.c \ + util-hugepages.c \ util-hyperscan.c \ util-ioctl.c \ util-ip.c \ @@ -1217,6 +1232,7 @@ libsuricata_c_a_SOURCES = \ util-unittest-helper.c \ util-var.c \ util-var-name.c \ + util-port-interval-tree.c \ win32-misc.c \ win32-service.c \ win32-syscall.c @@ -1271,7 +1287,8 @@ EXTRA_DIST = \ tests/detect-tls-version.c \ tests/detect-ipaddr.c \ tests/detect.c \ - tests/stream-tcp.c + tests/stream-tcp.c \ + tests/output-json-stats.c install-headers: mkdir -p $(DESTDIR)${includedir}/suricata @@ -1326,7 +1343,7 @@ if BUILD_FUZZTARGETS LDFLAGS_FUZZ = $(all_libraries) $(SECLDFLAGS) LDADD_FUZZ = libsuricata_c.a $(RUST_SURICATA_LIB) $(HTP_LDADD) $(RUST_LDADD) -fuzz_applayerprotodetectgetproto_SOURCES = tests/fuzz/fuzz_applayerprotodetectgetproto.c +fuzz_applayerprotodetectgetproto_SOURCES = tests/fuzz/fuzz_applayerprotodetectgetproto.c tests/fuzz/confyaml.c fuzz_applayerprotodetectgetproto_LDFLAGS = $(LDFLAGS_FUZZ) fuzz_applayerprotodetectgetproto_LDADD = $(LDADD_FUZZ) if HAS_FUZZLDFLAGS @@ -1337,7 +1354,7 @@ endif # force usage of CXX for linker nodist_EXTRA_fuzz_applayerprotodetectgetproto_SOURCES = force-cxx-linking.cxx -fuzz_applayerparserparse_SOURCES = tests/fuzz/fuzz_applayerparserparse.c +fuzz_applayerparserparse_SOURCES = tests/fuzz/fuzz_applayerparserparse.c tests/fuzz/confyaml.c fuzz_applayerparserparse_LDFLAGS = $(LDFLAGS_FUZZ) fuzz_applayerparserparse_LDADD = $(LDADD_FUZZ) if HAS_FUZZLDFLAGS @@ -1381,7 +1398,7 @@ endif # force usage of CXX for linker nodist_EXTRA_fuzz_decodepcapfile_SOURCES = force-cxx-linking.cxx -fuzz_sigpcap_SOURCES = tests/fuzz/fuzz_sigpcap.c +fuzz_sigpcap_SOURCES = tests/fuzz/fuzz_sigpcap.c tests/fuzz/confyaml.c fuzz_sigpcap_LDFLAGS = $(LDFLAGS_FUZZ) fuzz_sigpcap_LDADD = $(LDADD_FUZZ) if HAS_FUZZLDFLAGS @@ -1393,7 +1410,7 @@ endif nodist_EXTRA_fuzz_sigpcap_SOURCES = force-cxx-linking.cxx if HAS_FUZZPCAP -fuzz_sigpcap_aware_SOURCES = tests/fuzz/fuzz_sigpcap_aware.c +fuzz_sigpcap_aware_SOURCES = tests/fuzz/fuzz_sigpcap_aware.c tests/fuzz/confyaml.c fuzz_sigpcap_aware_LDFLAGS = $(LDFLAGS_FUZZ) fuzz_sigpcap_aware_LDADD = $(LDADD_FUZZ) -lfuzzpcap if HAS_FUZZLDFLAGS @@ -1404,7 +1421,7 @@ endif # force usage of CXX for linker nodist_EXTRA_fuzz_sigpcap_aware_SOURCES = force-cxx-linking.cxx -fuzz_predefpcap_aware_SOURCES = tests/fuzz/fuzz_predefpcap_aware.c +fuzz_predefpcap_aware_SOURCES = tests/fuzz/fuzz_predefpcap_aware.c tests/fuzz/confyaml.c fuzz_predefpcap_aware_LDFLAGS = $(LDFLAGS_FUZZ) fuzz_predefpcap_aware_LDADD = $(LDADD_FUZZ) -lfuzzpcap if HAS_FUZZLDFLAGS diff --git a/src/app-layer-ftp.c b/src/app-layer-ftp.c index 3db448279073..d1804c61e3ba 100644 --- a/src/app-layer-ftp.c +++ b/src/app-layer-ftp.c @@ -192,13 +192,17 @@ static int FTPCheckMemcap(uint64_t size) static void *FTPCalloc(size_t n, size_t size) { - if (FTPCheckMemcap((uint32_t)(n * size)) == 0) + if (FTPCheckMemcap((uint32_t)(n * size)) == 0) { + sc_errno = SC_ELIMIT; return NULL; + } void *ptr = SCCalloc(n, size); - if (unlikely(ptr == NULL)) + if (unlikely(ptr == NULL)) { + sc_errno = SC_ENOMEM; return NULL; + } FTPIncrMemuse((uint64_t)(n * size)); return ptr; @@ -208,12 +212,16 @@ static void *FTPRealloc(void *ptr, size_t orig_size, size_t size) { void *rptr = NULL; - if (FTPCheckMemcap((uint32_t)(size - orig_size)) == 0) + if (FTPCheckMemcap((uint32_t)(size - orig_size)) == 0) { + sc_errno = SC_ELIMIT; return NULL; + } rptr = SCRealloc(ptr, size); - if (rptr == NULL) + if (rptr == NULL) { + sc_errno = SC_ENOMEM; return NULL; + } if (size > orig_size) { FTPIncrMemuse(size - orig_size); @@ -997,7 +1005,7 @@ static AppLayerResult FTPDataParse(Flow *f, FtpDataState *ftpdata_state, ? AppLayerParserStateIssetFlag(pstate, APP_LAYER_PARSER_EOF_TS) != 0 : AppLayerParserStateIssetFlag(pstate, APP_LAYER_PARSER_EOF_TC) != 0; - ftpdata_state->tx_data.file_flags |= ftpdata_state->state_data.file_flags; + SCTxDataUpdateFileFlags(&ftpdata_state->tx_data, ftpdata_state->state_data.file_flags); if (ftpdata_state->tx_data.file_tx == 0) ftpdata_state->tx_data.file_tx = direction & (STREAM_TOSERVER | STREAM_TOCLIENT); diff --git a/src/app-layer-htp-file.c b/src/app-layer-htp-file.c index f96b37016061..7b3ba62edcee 100644 --- a/src/app-layer-htp-file.c +++ b/src/app-layer-htp-file.c @@ -179,7 +179,8 @@ int HTPFileOpenWithRange(HtpState *s, HtpTxUserData *txud, const uint8_t *filena } // Then, we will try to handle reassembly of different ranges of the same file - htp_tx_t *tx = htp_list_get(s->conn->transactions, txid); + // TODO have the caller pass directly the tx + htp_tx_t *tx = htp_list_get(s->conn->transactions, txid - s->tx_freed); if (!tx) { SCReturnInt(-1); } diff --git a/src/app-layer-htp-mem.c b/src/app-layer-htp-mem.c index bd9b79f67623..57967b1d6e4e 100644 --- a/src/app-layer-htp-mem.c +++ b/src/app-layer-htp-mem.c @@ -136,13 +136,17 @@ void *HTPMalloc(size_t size) { void *ptr = NULL; - if (HTPCheckMemcap((uint32_t)size) == 0) + if (HTPCheckMemcap((uint32_t)size) == 0) { + sc_errno = SC_ELIMIT; return NULL; + } ptr = SCMalloc(size); - if (unlikely(ptr == NULL)) + if (unlikely(ptr == NULL)) { + sc_errno = SC_ENOMEM; return NULL; + } HTPIncrMemuse((uint64_t)size); @@ -153,13 +157,17 @@ void *HTPCalloc(size_t n, size_t size) { void *ptr = NULL; - if (HTPCheckMemcap((uint32_t)(n * size)) == 0) + if (HTPCheckMemcap((uint32_t)(n * size)) == 0) { + sc_errno = SC_ELIMIT; return NULL; + } ptr = SCCalloc(n, size); - if (unlikely(ptr == NULL)) + if (unlikely(ptr == NULL)) { + sc_errno = SC_ENOMEM; return NULL; + } HTPIncrMemuse((uint64_t)(n * size)); @@ -169,13 +177,17 @@ void *HTPCalloc(size_t n, size_t size) void *HTPRealloc(void *ptr, size_t orig_size, size_t size) { if (size > orig_size) { - if (HTPCheckMemcap((uint32_t)(size - orig_size)) == 0) + if (HTPCheckMemcap((uint32_t)(size - orig_size)) == 0) { + sc_errno = SC_ELIMIT; return NULL; + } } void *rptr = SCRealloc(ptr, size); - if (rptr == NULL) + if (rptr == NULL) { + sc_errno = SC_ENOMEM; return NULL; + } if (size > orig_size) { HTPIncrMemuse((uint64_t)(size - orig_size)); diff --git a/src/app-layer-htp-range.c b/src/app-layer-htp-range.c index 3cdde35ba288..f0d75a9750c0 100644 --- a/src/app-layer-htp-range.c +++ b/src/app-layer-htp-range.c @@ -351,8 +351,10 @@ static HttpRangeContainerBlock *HttpRangeOpenFile(HttpRangeContainerFile *c, uin { HttpRangeContainerBlock *r = HttpRangeOpenFileAux(c, start, end, total, sbcfg, name, name_len, flags); - if (HttpRangeAppendData(sbcfg, r, data, len) < 0) { - SCLogDebug("Failed to append data while opening"); + if (r) { + if (HttpRangeAppendData(sbcfg, r, data, len) < 0) { + SCLogDebug("Failed to append data while opening"); + } } return r; } diff --git a/src/app-layer-htp.c b/src/app-layer-htp.c index b576ba3b7b97..7351797046d6 100644 --- a/src/app-layer-htp.c +++ b/src/app-layer-htp.c @@ -401,7 +401,7 @@ void HTPStateFree(void *state) uint64_t total_txs = HTPStateGetTxCnt(state); /* free the list of body chunks */ if (s->conn != NULL) { - for (tx_id = 0; tx_id < total_txs; tx_id++) { + for (tx_id = s->tx_freed; tx_id < total_txs; tx_id++) { htp_tx_t *tx = HTPStateGetTx(s, tx_id); if (tx != NULL) { HtpTxUserData *htud = (HtpTxUserData *) htp_tx_get_user_data(tx); @@ -458,8 +458,10 @@ static void HTPStateTransactionFree(void *state, uint64_t id) tx->request_progress = HTP_REQUEST_COMPLETE; tx->response_progress = HTP_RESPONSE_COMPLETE; } + // replaces tx in the s->conn->transactions list by NULL htp_tx_destroy(tx); } + s->tx_freed += htp_connp_tx_freed(s->connp); } /** @@ -1870,7 +1872,7 @@ static int HTPCallbackRequestBodyData(htp_tx_data_t *d) if (tx_ud == NULL) { SCReturnInt(HTP_OK); } - tx_ud->tx_data.file_flags |= hstate->state_data.file_flags; + SCTxDataUpdateFileFlags(&tx_ud->tx_data, hstate->state_data.file_flags); if (!tx_ud->response_body_init) { tx_ud->response_body_init = 1; @@ -2000,7 +2002,7 @@ static int HTPCallbackResponseBodyData(htp_tx_data_t *d) if (tx_ud == NULL) { SCReturnInt(HTP_OK); } - tx_ud->tx_data.file_flags |= hstate->state_data.file_flags; + SCTxDataUpdateFileFlags(&tx_ud->tx_data, hstate->state_data.file_flags); if (!tx_ud->request_body_init) { tx_ud->request_body_init = 1; } @@ -2515,6 +2517,10 @@ static void HTPConfigSetDefaultsPhase1(HTPCfgRec *cfg_prec) #endif #ifdef HAVE_HTP_CONFIG_SET_COMPRESSION_TIME_LIMIT htp_config_set_compression_time_limit(cfg_prec->cfg, HTP_CONFIG_DEFAULT_COMPRESSION_TIME_LIMIT); +#endif +#ifdef HAVE_HTP_CONFIG_SET_MAX_TX +#define HTP_CONFIG_DEFAULT_MAX_TX_LIMIT 512 + htp_config_set_max_tx(cfg_prec->cfg, HTP_CONFIG_DEFAULT_MAX_TX_LIMIT); #endif /* libhtp <= 0.5.9 doesn't use soft limit, but it's impossible to set * only the hard limit. So we set both here to the (current) htp defaults. @@ -2866,6 +2872,18 @@ static void HTPConfigParseParameters(HTPCfgRec *cfg_prec, ConfNode *s, } SCLogConfig("Setting HTTP decompression time limit to %" PRIu32 " usec", limit); htp_config_set_compression_time_limit(cfg_prec->cfg, (size_t)limit); +#endif +#ifdef HAVE_HTP_CONFIG_SET_MAX_TX + } else if (strcasecmp("max-tx", p->name) == 0) { + uint32_t limit = 0; + if (ParseSizeStringU32(p->val, &limit) < 0) { + FatalError("failed to parse 'max-tx' " + "from conf file - %s.", + p->val); + } + /* set default soft-limit with our new hard limit */ + SCLogConfig("Setting HTTP max-tx limit to %" PRIu32 " bytes", limit); + htp_config_set_max_tx(cfg_prec->cfg, (size_t)limit); #endif } else if (strcasecmp("randomize-inspection-sizes", p->name) == 0) { if (!g_disable_randomness) { @@ -2905,8 +2923,6 @@ static void HTPConfigParseParameters(HTPCfgRec *cfg_prec, ConfNode *s, if (strcasecmp("enabled", pval->name) == 0) { if (ConfValIsTrue(pval->val)) { cfg_prec->swf_decompression_enabled = 1; - SCLogWarning("Flash decompression is deprecated and will be removed in " - "Suricata 8; see ticket #6179"); } else if (ConfValIsFalse(pval->val)) { cfg_prec->swf_decompression_enabled = 0; } else { @@ -3079,7 +3095,7 @@ static uint64_t HTPStateGetTxCnt(void *alstate) if (size < 0) return 0ULL; SCLogDebug("size %"PRIu64, size); - return (uint64_t)size; + return (uint64_t)size + http_state->tx_freed; } else { return 0ULL; } @@ -3089,8 +3105,8 @@ static void *HTPStateGetTx(void *alstate, uint64_t tx_id) { HtpState *http_state = (HtpState *)alstate; - if (http_state != NULL && http_state->conn != NULL) - return htp_list_get(http_state->conn->transactions, tx_id); + if (http_state != NULL && http_state->conn != NULL && tx_id >= http_state->tx_freed) + return htp_list_get(http_state->conn->transactions, tx_id - http_state->tx_freed); else return NULL; } @@ -3100,9 +3116,9 @@ void *HtpGetTxForH2(void *alstate) // gets last transaction HtpState *http_state = (HtpState *)alstate; if (http_state != NULL && http_state->conn != NULL) { - size_t txid = htp_list_array_size(http_state->conn->transactions); - if (txid > 0) { - return htp_list_get(http_state->conn->transactions, txid - 1); + size_t txid = HTPStateGetTxCnt(http_state); + if (txid > http_state->tx_freed) { + return htp_list_get(http_state->conn->transactions, txid - http_state->tx_freed - 1); } } return NULL; diff --git a/src/app-layer-htp.h b/src/app-layer-htp.h index c8c3a7f7b987..f200ea1d39e7 100644 --- a/src/app-layer-htp.h +++ b/src/app-layer-htp.h @@ -247,6 +247,13 @@ typedef struct HtpState_ { htp_conn_t *conn; Flow *f; /**< Needed to retrieve the original flow when using HTPLib callbacks */ uint64_t transaction_cnt; + // tx_freed is the number of already freed transactions + // This is needed as libhtp only keeps the live transactions : + // To get the total number of transactions, we need to add + // the number of transactions tracked by libhtp to this number. + // It is also needed as an offset to translate between suricata + // transaction id to libhtp offset in its list/array + uint64_t tx_freed; const struct HTPCfgRec_ *cfg; uint16_t flags; uint16_t events; diff --git a/src/app-layer-parser.c b/src/app-layer-parser.c index 7783c076b65b..e9b84ed6d37c 100644 --- a/src/app-layer-parser.c +++ b/src/app-layer-parser.c @@ -1444,7 +1444,6 @@ int AppLayerParserParse(ThreadVars *tv, AppLayerParserThreadCtx *alp_tctx, Flow /* set the packets to no inspection and reassembly if required */ if (pstate->flags & APP_LAYER_PARSER_NO_INSPECTION) { AppLayerParserSetEOF(pstate); - FlowSetNoPayloadInspectionFlag(f); if (f->proto == IPPROTO_TCP) { StreamTcpDisableAppLayer(f); @@ -1466,6 +1465,9 @@ int AppLayerParserParse(ThreadVars *tv, AppLayerParserThreadCtx *alp_tctx, Flow StreamTcpSetSessionBypassFlag(ssn); } } + } else { + // for TCP, this is set after flushing + FlowSetNoPayloadInspectionFlag(f); } } diff --git a/src/app-layer-smtp.c b/src/app-layer-smtp.c index bf93c4517877..5a08c02ce46a 100644 --- a/src/app-layer-smtp.c +++ b/src/app-layer-smtp.c @@ -112,6 +112,8 @@ #define SMTP_EHLO_EXTENSION_STARTTLS #define SMTP_EHLO_EXTENSION_8BITMIME +#define SMTP_DEFAULT_MAX_TX 256 + typedef struct SMTPInput_ { /* current input that is being parsed */ const uint8_t *buf; @@ -421,6 +423,18 @@ static void SMTPConfigure(void) { smtp_config.raw_extraction = 0; } + uint64_t value = SMTP_DEFAULT_MAX_TX; + smtp_config.max_tx = SMTP_DEFAULT_MAX_TX; + const char *str = NULL; + if (ConfGet("app-layer.protocols.smtp.max-tx", &str) == 1) { + if (ParseSizeStringU64(str, &value) < 0) { + SCLogWarning("max-tx value cannot be deduced: %s," + " keeping default", + str); + } + smtp_config.max_tx = value; + } + SCReturn; } @@ -436,8 +450,11 @@ static void SMTPSetEvent(SMTPState *s, uint8_t e) SCLogDebug("couldn't set event %u", e); } -static SMTPTransaction *SMTPTransactionCreate(void) +static SMTPTransaction *SMTPTransactionCreate(SMTPState *state) { + if (state->tx_cnt > smtp_config.max_tx) { + return NULL; + } SMTPTransaction *tx = SCCalloc(1, sizeof(*tx)); if (tx == NULL) { return NULL; @@ -802,7 +819,7 @@ static inline void SMTPTransactionComplete(SMTPState *state) { DEBUG_VALIDATE_BUG_ON(state->curr_tx == NULL); if (state->curr_tx) - state->curr_tx->done = 1; + state->curr_tx->done = true; } /** @@ -815,6 +832,7 @@ static int SMTPProcessCommandDATA(SMTPState *state, SMTPTransaction *tx, Flow *f SCEnter(); DEBUG_VALIDATE_BUG_ON(tx == NULL); + SCTxDataUpdateFileFlags(&tx->tx_data, state->state_data.file_flags); if (!(state->parser_state & SMTP_PARSER_STATE_COMMAND_DATA_MODE)) { /* looks like are still waiting for a confirmation from the server */ return 0; @@ -976,6 +994,10 @@ static int SMTPProcessReply(SMTPState *state, Flow *f, AppLayerParserState *psta state->parser_state |= SMTP_PARSER_STATE_COMMAND_DATA_MODE; } else { /* decoder event */ + if (state->parser_state & SMTP_PARSER_STATE_PIPELINING_SERVER) { + // reset data mode if we had entered it prematurely + state->parser_state &= ~SMTP_PARSER_STATE_COMMAND_DATA_MODE; + } SMTPSetEvent(state, SMTP_DECODER_EVENT_DATA_COMMAND_REJECTED); } } else if (IsReplyToCommand(state, SMTP_COMMAND_RSET)) { @@ -1170,7 +1192,7 @@ static int SMTPProcessRequest(SMTPState *state, Flow *f, AppLayerParserState *ps return 0; } if (state->curr_tx == NULL || (state->curr_tx->done && !NoNewTx(state, line))) { - tx = SMTPTransactionCreate(); + tx = SMTPTransactionCreate(state); if (tx == NULL) return -1; state->curr_tx = tx; @@ -1198,36 +1220,19 @@ static int SMTPProcessRequest(SMTPState *state, Flow *f, AppLayerParserState *ps state->current_command = SMTP_COMMAND_STARTTLS; } else if (line->len >= 4 && SCMemcmpLowercase("data", line->buf, 4) == 0) { state->current_command = SMTP_COMMAND_DATA; - if (smtp_config.raw_extraction) { - if (state->tx_cnt > 1 && !state->curr_tx->done) { - // we did not close the previous tx, set error - SMTPSetEvent(state, SMTP_DECODER_EVENT_UNPARSABLE_CONTENT); - FileCloseFile(&tx->files_ts, &smtp_config.sbcfg, NULL, 0, FILE_TRUNCATED); - tx = SMTPTransactionCreate(); - if (tx == NULL) - return -1; - state->curr_tx = tx; - TAILQ_INSERT_TAIL(&state->tx_list, tx, next); - tx->tx_id = state->tx_cnt++; - } + if (state->curr_tx->is_data) { + // We did not receive a confirmation from server + // And now client sends a next DATA + SMTPSetEvent(state, SMTP_DECODER_EVENT_UNPARSABLE_CONTENT); + SCReturnInt(0); + } else if (smtp_config.raw_extraction) { if (FileOpenFileWithId(&tx->files_ts, &smtp_config.sbcfg, state->file_track_id++, (uint8_t *)rawmsgname, strlen(rawmsgname), NULL, 0, FILE_NOMD5 | FILE_NOMAGIC) == 0) { SMTPNewFile(tx, tx->files_ts.tail); } } else if (smtp_config.decode_mime) { - if (tx->mime_state) { - /* We have 2 chained mails and did not detect the end - * of first one. So we start a new transaction. */ - tx->mime_state->state_flag = PARSE_ERROR; - SMTPSetEvent(state, SMTP_DECODER_EVENT_UNPARSABLE_CONTENT); - tx = SMTPTransactionCreate(); - if (tx == NULL) - return -1; - state->curr_tx = tx; - TAILQ_INSERT_TAIL(&state->tx_list, tx, next); - tx->tx_id = state->tx_cnt++; - } + DEBUG_VALIDATE_BUG_ON(tx->mime_state); tx->mime_state = MimeDecInitParser(f, SMTPProcessDataChunk); if (tx->mime_state == NULL) { return MIME_DEC_ERR_MEM; @@ -1243,6 +1248,7 @@ static int SMTPProcessRequest(SMTPState *state, Flow *f, AppLayerParserState *ps tx->msg_tail = tx->mime_state->msg; } } + state->curr_tx->is_data = true; /* Enter immediately data mode without waiting for server reply */ if (state->parser_state & SMTP_PARSER_STATE_PIPELINING_SERVER) { state->parser_state |= SMTP_PARSER_STATE_COMMAND_DATA_MODE; @@ -1935,6 +1941,8 @@ static void SMTPTestInitConfig(void) smtp_config.content_inspect_window = FILEDATA_CONTENT_INSPECT_WINDOW; smtp_config.content_inspect_min_size = FILEDATA_CONTENT_INSPECT_MIN_SIZE; + smtp_config.max_tx = SMTP_DEFAULT_MAX_TX; + smtp_config.sbcfg.buf_size = FILEDATA_CONTENT_INSPECT_WINDOW; } diff --git a/src/app-layer-smtp.h b/src/app-layer-smtp.h index 9fc1d506bbbb..33b81d026a49 100644 --- a/src/app-layer-smtp.h +++ b/src/app-layer-smtp.h @@ -75,7 +75,12 @@ typedef struct SMTPTransaction_ { AppLayerTxData tx_data; - int done; + /** the tx is complete and can be logged and cleaned */ + bool done; + /** the tx has seen a DATA command */ + // another DATA command within the same context + // will trigger an app-layer event. + bool is_data; /** the first message contained in the session */ MimeDecEntity *msg_head; /** the last message contained in the session */ @@ -101,6 +106,7 @@ typedef struct SMTPConfig { uint32_t content_limit; uint32_t content_inspect_min_size; uint32_t content_inspect_window; + uint64_t max_tx; bool raw_extraction; diff --git a/src/app-layer-ssl.c b/src/app-layer-ssl.c index 302225f1903d..e5c1ed1eb0fb 100644 --- a/src/app-layer-ssl.c +++ b/src/app-layer-ssl.c @@ -43,6 +43,8 @@ #include "decode-events.h" #include "conf.h" +#include "feature.h" + #include "util-spm.h" #include "util-unittest.h" #include "util-debug.h" @@ -143,8 +145,9 @@ enum { ERR_EXTRACT_VALIDITY, }; -/* JA3 fingerprints are disabled by default */ +/* JA3 and JA4 fingerprints are disabled by default */ #define SSL_CONFIG_DEFAULT_JA3 0 +#define SSL_CONFIG_DEFAULT_JA4 0 enum SslConfigEncryptHandling { SSL_CNF_ENC_HANDLE_DEFAULT = 0, /**< disable raw content, continue tracking */ @@ -154,10 +157,12 @@ enum SslConfigEncryptHandling { typedef struct SslConfig_ { enum SslConfigEncryptHandling encrypt_mode; - /** dynamic setting for ja3: can be enabled on demand if not explicitly - * disabled. */ + /** dynamic setting for ja3 and ja4: can be enabled on demand if not + * explicitly disabled. */ SC_ATOMIC_DECLARE(int, enable_ja3); bool disable_ja3; /**< ja3 explicitly disabled. Don't enable on demand. */ + SC_ATOMIC_DECLARE(int, enable_ja4); + bool disable_ja4; /**< ja4 explicitly disabled. Don't enable on demand. */ } SslConfig; SslConfig ssl_config; @@ -691,6 +696,11 @@ static inline int TLSDecodeHSHelloVersion(SSLState *ssl_state, uint16_t version = (uint16_t)(*input << 8) | *(input + 1); ssl_state->curr_connp->version = version; + if (ssl_state->curr_connp->ja4 != NULL && + ssl_state->current_flags & SSL_AL_FLAG_STATE_CLIENT_HELLO) { + SCJA4SetTLSVersion(ssl_state->curr_connp->ja4, version); + } + /* TLSv1.3 draft1 to draft21 use the version field as earlier TLS versions, instead of using the supported versions extension. */ if ((ssl_state->current_flags & SSL_AL_FLAG_STATE_SERVER_HELLO) && @@ -834,17 +844,25 @@ static inline int TLSDecodeHSHelloCipherSuites(SSLState *ssl_state, goto invalid_length; } - if (SC_ATOMIC_GET(ssl_config.enable_ja3)) { - JA3Buffer *ja3_cipher_suites = Ja3BufferInit(); - if (ja3_cipher_suites == NULL) - return -1; + const bool enable_ja3 = SC_ATOMIC_GET(ssl_config.enable_ja3); + + if (enable_ja3 || SC_ATOMIC_GET(ssl_config.enable_ja4)) { + JA3Buffer *ja3_cipher_suites = NULL; + + if (enable_ja3) { + ja3_cipher_suites = Ja3BufferInit(); + if (ja3_cipher_suites == NULL) + return -1; + } uint16_t processed_len = 0; /* coverity[tainted_data] */ while (processed_len < cipher_suites_length) { if (!(HAS_SPACE(2))) { - Ja3BufferFree(&ja3_cipher_suites); + if (enable_ja3) { + Ja3BufferFree(&ja3_cipher_suites); + } goto invalid_length; } @@ -852,19 +870,25 @@ static inline int TLSDecodeHSHelloCipherSuites(SSLState *ssl_state, input += 2; if (TLSDecodeValueIsGREASE(cipher_suite) != 1) { - int rc = Ja3BufferAddValue(&ja3_cipher_suites, cipher_suite); - if (rc != 0) { - return -1; + if (ssl_state->curr_connp->ja4 != NULL && + ssl_state->current_flags & SSL_AL_FLAG_STATE_CLIENT_HELLO) { + SCJA4AddCipher(ssl_state->curr_connp->ja4, cipher_suite); + } + if (enable_ja3) { + int rc = Ja3BufferAddValue(&ja3_cipher_suites, cipher_suite); + if (rc != 0) { + return -1; + } } } - processed_len += 2; } - int rc = Ja3BufferAppendBuffer(&ssl_state->curr_connp->ja3_str, - &ja3_cipher_suites); - if (rc == -1) { - return -1; + if (enable_ja3) { + int rc = Ja3BufferAppendBuffer(&ssl_state->curr_connp->ja3_str, &ja3_cipher_suites); + if (rc == -1) { + return -1; + } } } else { @@ -1025,6 +1049,10 @@ static inline int TLSDecodeHSHelloExtensionSupportedVersions(SSLState *ssl_state uint16_t ver = (uint16_t)(input[i] << 8) | input[i + 1]; if (TLSVersionValid(ver)) { ssl_state->curr_connp->version = ver; + if (ssl_state->curr_connp->ja4 != NULL && + ssl_state->current_flags & SSL_AL_FLAG_STATE_CLIENT_HELLO) { + SCJA4SetTLSVersion(ssl_state->curr_connp->ja4, ver); + } break; } i += 2; @@ -1171,6 +1199,113 @@ static inline int TLSDecodeHSHelloExtensionEllipticCurvePF(SSLState *ssl_state, return -1; } +static inline int TLSDecodeHSHelloExtensionSigAlgorithms( + SSLState *ssl_state, const uint8_t *const initial_input, const uint32_t input_len) +{ + const uint8_t *input = initial_input; + + /* Empty extension */ + if (input_len == 0) + return 0; + + if (!(HAS_SPACE(2))) + goto invalid_length; + + uint16_t sigalgo_len = (uint16_t)(*input << 8) | *(input + 1); + input += 2; + + /* Signature algorithms length should always be divisible by 2 */ + if ((sigalgo_len % 2) != 0) { + goto invalid_length; + } + + if (!(HAS_SPACE(sigalgo_len))) + goto invalid_length; + + if (ssl_state->curr_connp->ja4 != NULL && + ssl_state->current_flags & SSL_AL_FLAG_STATE_CLIENT_HELLO) { + uint16_t sigalgo_processed_len = 0; + while (sigalgo_processed_len < sigalgo_len) { + uint16_t sigalgo = (uint16_t)(*input << 8) | *(input + 1); + input += 2; + sigalgo_processed_len += 2; + + SCJA4AddSigAlgo(ssl_state->curr_connp->ja4, sigalgo); + } + } else { + /* Skip signature algorithms */ + input += sigalgo_len; + } + + return (input - initial_input); + +invalid_length: + SCLogDebug("Signature algorithm list invalid length"); + SSLSetEvent(ssl_state, TLS_DECODER_EVENT_HANDSHAKE_INVALID_LENGTH); + + return -1; +} + +static inline int TLSDecodeHSHelloExtensionALPN( + SSLState *ssl_state, const uint8_t *const initial_input, const uint32_t input_len) +{ + const uint8_t *input = initial_input; + + /* Empty extension */ + if (input_len == 0) + return 0; + + if (!(HAS_SPACE(2))) + goto invalid_length; + + uint16_t alpn_len = (uint16_t)(*input << 8) | *(input + 1); + input += 2; + + if (!(HAS_SPACE(alpn_len))) + goto invalid_length; + + if (ssl_state->curr_connp->ja4 != NULL && + ssl_state->current_flags & SSL_AL_FLAG_STATE_CLIENT_HELLO) { + /* We use 32 bits here to avoid potentially overflowing a value that + needs to be compared to an unsigned 16-bit value. */ + uint32_t alpn_processed_len = 0; + while (alpn_processed_len < alpn_len) { + uint8_t protolen = *input; + input += 1; + alpn_processed_len += 1; + + if (!(HAS_SPACE(protolen))) + goto invalid_length; + + /* Check if reading another protolen bytes would exceed the + overall ALPN length; if so, skip and continue */ + if (alpn_processed_len + protolen > ((uint32_t)alpn_len)) { + input += alpn_len - alpn_processed_len; + break; + } + + /* Only record the first value for JA4 */ + if (alpn_processed_len == 1) { + SCJA4SetALPN(ssl_state->curr_connp->ja4, (const char *)input, protolen); + } + + alpn_processed_len += protolen; + input += protolen; + } + } else { + /* Skip ALPN protocols */ + input += alpn_len; + } + + return (input - initial_input); + +invalid_length: + SCLogDebug("ALPN list invalid length"); + SSLSetEvent(ssl_state, TLS_DECODER_EVENT_HANDSHAKE_INVALID_LENGTH); + + return -1; +} + static inline int TLSDecodeHSHelloExtensions(SSLState *ssl_state, const uint8_t * const initial_input, const uint32_t input_len) @@ -1272,6 +1407,28 @@ static inline int TLSDecodeHSHelloExtensions(SSLState *ssl_state, break; } + case SSL_EXTENSION_SIGNATURE_ALGORITHMS: { + /* coverity[tainted_data] */ + ret = TLSDecodeHSHelloExtensionSigAlgorithms(ssl_state, input, ext_len); + if (ret < 0) + goto end; + + input += ret; + + break; + } + + case SSL_EXTENSION_ALPN: { + /* coverity[tainted_data] */ + ret = TLSDecodeHSHelloExtensionALPN(ssl_state, input, ext_len); + if (ret < 0) + goto end; + + input += ext_len; + + break; + } + case SSL_EXTENSION_EARLY_DATA: { if (ssl_state->current_flags & SSL_AL_FLAG_STATE_CLIENT_HELLO) { @@ -1325,6 +1482,13 @@ static inline int TLSDecodeHSHelloExtensions(SSLState *ssl_state, } } + if (ssl_state->curr_connp->ja4 != NULL && + ssl_state->current_flags & SSL_AL_FLAG_STATE_CLIENT_HELLO) { + if (TLSDecodeValueIsGREASE(ext_type) != 1) { + SCJA4AddExtension(ssl_state->curr_connp->ja4, ext_type); + } + } + processed_len += ext_len + 4; } @@ -1373,6 +1537,15 @@ static int TLSDecodeHandshakeHello(SSLState *ssl_state, int ret; uint32_t parsed = 0; + /* Ensure that we have a JA4 state defined by now if we have JA4 enabled, + we are in a client hello and we don't have such a state yet (to avoid + leaking memory in case this function is entered more than once). */ + if (SC_ATOMIC_GET(ssl_config.enable_ja4) && + ssl_state->current_flags & SSL_AL_FLAG_STATE_CLIENT_HELLO && + ssl_state->curr_connp->ja4 == NULL) { + ssl_state->curr_connp->ja4 = SCJA4New(); + } + ret = TLSDecodeHSHelloVersion(ssl_state, input, input_len); if (ret < 0) goto end; @@ -2697,6 +2870,8 @@ static void SSLStateFree(void *p) if (ssl_state->server_connp.session_id) SCFree(ssl_state->server_connp.session_id); + if (ssl_state->client_connp.ja4) + SCJA4Free(ssl_state->client_connp.ja4); if (ssl_state->client_connp.ja3_str) Ja3BufferFree(&ssl_state->client_connp.ja3_str); if (ssl_state->client_connp.ja3_hash) @@ -2950,6 +3125,56 @@ static int SSLRegisterPatternsForProtocolDetection(void) return 0; } +#ifdef HAVE_JA3 +static void CheckJA3Enabled(void) +{ + const char *strval = NULL; + /* Check if we should generate JA3 fingerprints */ + int enable_ja3 = SSL_CONFIG_DEFAULT_JA3; + if (ConfGet("app-layer.protocols.tls.ja3-fingerprints", &strval) != 1) { + enable_ja3 = SSL_CONFIG_DEFAULT_JA3; + } else if (strcmp(strval, "auto") == 0) { + enable_ja3 = SSL_CONFIG_DEFAULT_JA3; + } else if (ConfValIsFalse(strval)) { + enable_ja3 = 0; + ssl_config.disable_ja3 = true; + } else if (ConfValIsTrue(strval)) { + enable_ja3 = true; + } + SC_ATOMIC_SET(ssl_config.enable_ja3, enable_ja3); + if (!ssl_config.disable_ja3 && !g_disable_hashing) { + /* The feature is available, i.e. _could_ be activated by a rule or + even is enabled in the configuration. */ + ProvidesFeature(FEATURE_JA3); + } +} +#endif /* HAVE_JA3 */ + +#ifdef HAVE_JA4 +static void CheckJA4Enabled(void) +{ + const char *strval = NULL; + /* Check if we should generate JA4 fingerprints */ + int enable_ja4 = SSL_CONFIG_DEFAULT_JA4; + if (ConfGet("app-layer.protocols.tls.ja4-fingerprints", &strval) != 1) { + enable_ja4 = SSL_CONFIG_DEFAULT_JA4; + } else if (strcmp(strval, "auto") == 0) { + enable_ja4 = SSL_CONFIG_DEFAULT_JA4; + } else if (ConfValIsFalse(strval)) { + enable_ja4 = 0; + ssl_config.disable_ja4 = true; + } else if (ConfValIsTrue(strval)) { + enable_ja4 = true; + } + SC_ATOMIC_SET(ssl_config.enable_ja4, enable_ja4); + if (!ssl_config.disable_ja4 && !g_disable_hashing) { + /* The feature is available, i.e. _could_ be activated by a rule or + even is enabled in the configuration. */ + ProvidesFeature(FEATURE_JA4); + } +} +#endif /* HAVE_JA4 */ + /** * \brief Function to register the SSL protocol parser and other functions */ @@ -3049,29 +3274,30 @@ void RegisterSSLParsers(void) } SCLogDebug("ssl_config.encrypt_mode %u", ssl_config.encrypt_mode); - /* Check if we should generate JA3 fingerprints */ - int enable_ja3 = SSL_CONFIG_DEFAULT_JA3; - const char *strval = NULL; - if (ConfGet("app-layer.protocols.tls.ja3-fingerprints", &strval) != 1) { - enable_ja3 = SSL_CONFIG_DEFAULT_JA3; - } else if (strcmp(strval, "auto") == 0) { - enable_ja3 = SSL_CONFIG_DEFAULT_JA3; - } else if (ConfValIsFalse(strval)) { - enable_ja3 = 0; - ssl_config.disable_ja3 = true; - } else if (ConfValIsTrue(strval)) { - enable_ja3 = true; - } - SC_ATOMIC_SET(ssl_config.enable_ja3, enable_ja3); +#ifdef HAVE_JA3 + CheckJA3Enabled(); +#endif /* HAVE_JA3 */ +#ifdef HAVE_JA4 + CheckJA4Enabled(); +#endif /* HAVE_JA4 */ if (g_disable_hashing) { if (SC_ATOMIC_GET(ssl_config.enable_ja3)) { SCLogWarning("MD5 calculation has been disabled, disabling JA3"); SC_ATOMIC_SET(ssl_config.enable_ja3, 0); } + if (SC_ATOMIC_GET(ssl_config.enable_ja4)) { + SCLogWarning("Hashing has been disabled, disabling JA4"); + SC_ATOMIC_SET(ssl_config.enable_ja4, 0); + } } else { if (RunmodeIsUnittests()) { +#ifdef HAVE_JA3 SC_ATOMIC_SET(ssl_config.enable_ja3, 1); +#endif /* HAVE_JA3 */ +#ifdef HAVE_JA4 + SC_ATOMIC_SET(ssl_config.enable_ja4, 1); +#endif /* HAVE_JA4 */ } } } else { @@ -3099,10 +3325,45 @@ void SSLEnableJA3(void) SC_ATOMIC_SET(ssl_config.enable_ja3, 1); } -bool SSLJA3IsEnabled(void) +/** + * \brief if not explicitly disabled in config, enable ja4 support + * + * Implemented using atomic to allow rule reloads to do this at + * runtime. + */ +void SSLEnableJA4(void) { - if (SC_ATOMIC_GET(ssl_config.enable_ja3)) { - return true; + if (g_disable_hashing || ssl_config.disable_ja4) { + return; } - return false; + if (SC_ATOMIC_GET(ssl_config.enable_ja4)) { + return; + } + SC_ATOMIC_SET(ssl_config.enable_ja4, 1); +} + +/** + * \brief return whether ja3 is effectively enabled + * + * This means that it either has been enabled explicitly or has been + * enabled by having loaded a rule while not being explicitly disabled. + * + * \retval true if enabled, false otherwise + */ +bool SSLJA3IsEnabled(void) +{ + return SC_ATOMIC_GET(ssl_config.enable_ja3); +} + +/** + * \brief return whether ja4 is effectively enabled + * + * This means that it either has been enabled explicitly or has been + * enabled by having loaded a rule while not being explicitly disabled. + * + * \retval true if enabled, false otherwise + */ +bool SSLJA4IsEnabled(void) +{ + return SC_ATOMIC_GET(ssl_config.enable_ja4); } diff --git a/src/app-layer-ssl.h b/src/app-layer-ssl.h index f2e42622308e..09d975ae55d7 100644 --- a/src/app-layer-ssl.h +++ b/src/app-layer-ssl.h @@ -141,6 +141,8 @@ enum { #define SSL_EXTENSION_SNI 0x0000 #define SSL_EXTENSION_ELLIPTIC_CURVES 0x000a #define SSL_EXTENSION_EC_POINT_FORMATS 0x000b +#define SSL_EXTENSION_SIGNATURE_ALGORITHMS 0x000d +#define SSL_EXTENSION_ALPN 0x0010 #define SSL_EXTENSION_SESSION_TICKET 0x0023 #define SSL_EXTENSION_EARLY_DATA 0x002a #define SSL_EXTENSION_SUPPORTED_VERSIONS 0x002b @@ -267,6 +269,8 @@ typedef struct SSLStateConnp_ { JA3Buffer *ja3_str; char *ja3_hash; + JA4 *ja4; + /* handshake tls fragmentation buffer. Handshake messages can be fragmented over multiple * TLS records. */ uint8_t *hs_buffer; @@ -307,5 +311,7 @@ void RegisterSSLParsers(void); void SSLVersionToString(uint16_t, char *); void SSLEnableJA3(void); bool SSLJA3IsEnabled(void); +void SSLEnableJA4(void); +bool SSLJA4IsEnabled(void); #endif /* __APP_LAYER_SSL_H__ */ diff --git a/src/app-layer.c b/src/app-layer.c index b031afce8ac8..794e8e84d315 100644 --- a/src/app-layer.c +++ b/src/app-layer.c @@ -510,6 +510,20 @@ static int TCPProtoDetect(ThreadVars *tv, TcpReassemblyThreadCtx *ra_ctx, if (r != 1) { StreamTcpUpdateAppLayerProgress(ssn, direction, data_len); } + if (r == 0) { + if (*alproto_otherdir == ALPROTO_UNKNOWN) { + TcpStream *opposing_stream; + if (*stream == &ssn->client) { + opposing_stream = &ssn->server; + } else { + opposing_stream = &ssn->client; + } + if (StreamTcpIsSetStreamFlagAppProtoDetectionCompleted(opposing_stream)) { + // can happen in detection-only + AppLayerIncFlowCounter(tv, f); + } + } + } if (r < 0) { goto parser_error; } diff --git a/src/conf-yaml-loader.c b/src/conf-yaml-loader.c index 1bd107e0c1c9..463eb2e5823c 100644 --- a/src/conf-yaml-loader.c +++ b/src/conf-yaml-loader.c @@ -185,7 +185,7 @@ static int ConfYamlParse(yaml_parser_t *parser, ConfNode *parent, int inseq, int while (!done) { if (!yaml_parser_parse(parser, &event)) { - SCLogError("Failed to parse configuration file at line %" PRIuMAX ": %s\n", + SCLogError("Failed to parse configuration file at line %" PRIuMAX ": %s", (uintmax_t)parser->problem_mark.line, parser->problem); retval = -1; break; @@ -394,8 +394,19 @@ static int ConfYamlParse(yaml_parser_t *parser, ConfNode *parent, int inseq, int if (inseq) { char sequence_node_name[DEFAULT_NAME_LEN]; snprintf(sequence_node_name, DEFAULT_NAME_LEN, "%d", seq_idx++); - ConfNode *seq_node = ConfNodeLookupChild(node, - sequence_node_name); + ConfNode *seq_node = NULL; + if (was_empty < 0) { + // initialize was_empty + if (TAILQ_EMPTY(&node->head)) { + was_empty = 1; + } else { + was_empty = 0; + } + } + // we only check if the node's list was not empty at first + if (was_empty == 0) { + seq_node = ConfNodeLookupChild(node, sequence_node_name); + } if (seq_node != NULL) { /* The sequence node has already been set, probably * from the command line. Remove it so it gets diff --git a/src/datasets.c b/src/datasets.c index d89ed8df59da..01ef5bb47c90 100644 --- a/src/datasets.c +++ b/src/datasets.c @@ -746,6 +746,11 @@ Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save, break; } + if (set->hash && SC_ATOMIC_GET(set->hash->memcap_reached)) { + SCLogError("dataset too large for set memcap"); + goto out_err; + } + SCLogDebug("set %p/%s type %u save %s load %s", set, set->name, set->type, set->save, set->load); diff --git a/src/decode-ipv4.c b/src/decode-ipv4.c index 92d0c6ecfd5c..c1bb9cce4731 100644 --- a/src/decode-ipv4.c +++ b/src/decode-ipv4.c @@ -601,7 +601,7 @@ int DecodeIPV4(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, } case IPPROTO_IP: /* check PPP VJ uncompressed packets and decode tcp dummy */ - if(p->ppph != NULL && SCNtohs(p->ppph->protocol) == PPP_VJ_UCOMP) { + if (p->flags & PKT_PPP_VJ_UCOMP) { DecodeTCP(tv, dtv, p, pkt + IPV4_GET_HLEN(p), IPV4_GET_IPLEN(p) - IPV4_GET_HLEN(p)); } diff --git a/src/decode-ipv4.h b/src/decode-ipv4.h index d247fa9f0033..a825007c20bc 100644 --- a/src/decode-ipv4.h +++ b/src/decode-ipv4.h @@ -154,20 +154,18 @@ typedef struct IPV4Hdr_ memset(&p->ip4vars, 0x00, sizeof(p->ip4vars)); \ } while (0) -enum IPV4OptionFlags { - IPV4_OPT_FLAG_EOL = 0, - IPV4_OPT_FLAG_NOP, - IPV4_OPT_FLAG_RR, - IPV4_OPT_FLAG_TS, - IPV4_OPT_FLAG_QS, - IPV4_OPT_FLAG_LSRR, - IPV4_OPT_FLAG_SSRR, - IPV4_OPT_FLAG_SID, - IPV4_OPT_FLAG_SEC, - IPV4_OPT_FLAG_CIPSO, - IPV4_OPT_FLAG_RTRALT, - IPV4_OPT_FLAG_ESEC, -}; +#define IPV4_OPT_FLAG_EOL BIT_U16(1) +#define IPV4_OPT_FLAG_NOP BIT_U16(2) +#define IPV4_OPT_FLAG_RR BIT_U16(3) +#define IPV4_OPT_FLAG_TS BIT_U16(4) +#define IPV4_OPT_FLAG_QS BIT_U16(5) +#define IPV4_OPT_FLAG_LSRR BIT_U16(6) +#define IPV4_OPT_FLAG_SSRR BIT_U16(7) +#define IPV4_OPT_FLAG_SID BIT_U16(8) +#define IPV4_OPT_FLAG_SEC BIT_U16(9) +#define IPV4_OPT_FLAG_CIPSO BIT_U16(10) +#define IPV4_OPT_FLAG_RTRALT BIT_U16(11) +#define IPV4_OPT_FLAG_ESEC BIT_U16(12) /* helper structure with parsed ipv4 info */ typedef struct IPV4Vars_ diff --git a/src/decode-ppp.c b/src/decode-ppp.c index 5bf682bd2fc6..72a028c98bb8 100644 --- a/src/decode-ppp.c +++ b/src/decode-ppp.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2021 Open Information Security Foundation +/* Copyright (C) 2007-2024 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free @@ -41,70 +41,94 @@ #include "util-unittest.h" #include "util-debug.h" -int DecodePPP(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, - const uint8_t *pkt, uint32_t len) +static int DecodePPPCompressedProto(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, + const uint8_t *pkt, uint32_t len, uint16_t proto_offset) { - DEBUG_VALIDATE_BUG_ON(pkt == NULL); - - StatsIncr(tv, dtv->counter_ppp); + const uint32_t data_offset = proto_offset + 1; + switch (*(pkt + proto_offset)) { + case 0x21: { /* PPP_IP */ + if (unlikely(len < (data_offset + IPV4_HEADER_LEN))) { + ENGINE_SET_INVALID_EVENT(p, PPPVJU_PKT_TOO_SMALL); + return TM_ECODE_FAILED; + } + DEBUG_VALIDATE_BUG_ON(len < data_offset); + uint16_t iplen = (uint16_t)MIN((uint32_t)USHRT_MAX, len - data_offset); + return DecodeIPV4(tv, dtv, p, pkt + data_offset, iplen); + } + case 0x57: { /* PPP_IPV6 */ + if (unlikely(len < (data_offset + IPV6_HEADER_LEN))) { + ENGINE_SET_INVALID_EVENT(p, PPPIPV6_PKT_TOO_SMALL); + return TM_ECODE_FAILED; + } + DEBUG_VALIDATE_BUG_ON(len < data_offset); + uint16_t iplen = (uint16_t)MIN((uint32_t)USHRT_MAX, len - data_offset); + return DecodeIPV6(tv, dtv, p, pkt + data_offset, iplen); + } + case 0x2f: /* PPP_VJ_UCOMP */ + if (unlikely(len < (data_offset + IPV4_HEADER_LEN))) { + ENGINE_SET_INVALID_EVENT(p, PPPVJU_PKT_TOO_SMALL); + return TM_ECODE_FAILED; + } - if (unlikely(len < PPP_HEADER_LEN)) { - ENGINE_SET_INVALID_EVENT(p, PPP_PKT_TOO_SMALL); - return TM_ECODE_FAILED; - } - if (!PacketIncreaseCheckLayers(p)) { - return TM_ECODE_FAILED; - } + if (unlikely(len > data_offset + USHRT_MAX)) { + return TM_ECODE_FAILED; + } - p->ppph = (PPPHdr *)pkt; + if (likely(IPV4_GET_RAW_VER((IPV4Hdr *)(pkt + data_offset)) == 4)) { + p->flags |= PKT_PPP_VJ_UCOMP; + return DecodeIPV4(tv, dtv, p, pkt + data_offset, (uint16_t)(len - data_offset)); + } else + return TM_ECODE_FAILED; + break; - SCLogDebug("p %p pkt %p PPP protocol %04x Len: %" PRIu32 "", - p, pkt, SCNtohs(p->ppph->protocol), len); + default: + ENGINE_SET_EVENT(p, PPP_UNSUP_PROTO); + return TM_ECODE_OK; + } +} - switch (SCNtohs(p->ppph->protocol)) - { +static int DecodePPPUncompressedProto(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, + const uint8_t *pkt, uint32_t len, const uint16_t proto, const uint32_t data_offset) +{ + switch (proto) { case PPP_VJ_UCOMP: - if (unlikely(len < (PPP_HEADER_LEN + IPV4_HEADER_LEN))) { - ENGINE_SET_INVALID_EVENT(p,PPPVJU_PKT_TOO_SMALL); - p->ppph = NULL; + if (unlikely(len < (data_offset + IPV4_HEADER_LEN))) { + ENGINE_SET_INVALID_EVENT(p, PPPVJU_PKT_TOO_SMALL); return TM_ECODE_FAILED; } - if (unlikely(len > PPP_HEADER_LEN + USHRT_MAX)) { + if (unlikely(len > data_offset + USHRT_MAX)) { return TM_ECODE_FAILED; } - if (likely(IPV4_GET_RAW_VER((IPV4Hdr *)(pkt + PPP_HEADER_LEN)) == 4)) { - return DecodeIPV4( - tv, dtv, p, pkt + PPP_HEADER_LEN, (uint16_t)(len - PPP_HEADER_LEN)); + if (likely(IPV4_GET_RAW_VER((IPV4Hdr *)(pkt + data_offset)) == 4)) { + return DecodeIPV4(tv, dtv, p, pkt + data_offset, (uint16_t)(len - data_offset)); } else return TM_ECODE_FAILED; break; case PPP_IP: - if (unlikely(len < (PPP_HEADER_LEN + IPV4_HEADER_LEN))) { - ENGINE_SET_INVALID_EVENT(p,PPPIPV4_PKT_TOO_SMALL); - p->ppph = NULL; + if (unlikely(len < (data_offset + IPV4_HEADER_LEN))) { + ENGINE_SET_INVALID_EVENT(p, PPPIPV4_PKT_TOO_SMALL); return TM_ECODE_FAILED; } - if (unlikely(len > PPP_HEADER_LEN + USHRT_MAX)) { + if (unlikely(len > data_offset + USHRT_MAX)) { return TM_ECODE_FAILED; } - return DecodeIPV4(tv, dtv, p, pkt + PPP_HEADER_LEN, (uint16_t)(len - PPP_HEADER_LEN)); + return DecodeIPV4(tv, dtv, p, pkt + data_offset, (uint16_t)(len - data_offset)); /* PPP IPv6 was not tested */ case PPP_IPV6: - if (unlikely(len < (PPP_HEADER_LEN + IPV6_HEADER_LEN))) { - ENGINE_SET_INVALID_EVENT(p,PPPIPV6_PKT_TOO_SMALL); - p->ppph = NULL; + if (unlikely(len < (data_offset + IPV6_HEADER_LEN))) { + ENGINE_SET_INVALID_EVENT(p, PPPIPV6_PKT_TOO_SMALL); return TM_ECODE_FAILED; } - if (unlikely(len > PPP_HEADER_LEN + USHRT_MAX)) { + if (unlikely(len > data_offset + USHRT_MAX)) { return TM_ECODE_FAILED; } - return DecodeIPV6(tv, dtv, p, pkt + PPP_HEADER_LEN, (uint16_t)(len - PPP_HEADER_LEN)); + return DecodeIPV6(tv, dtv, p, pkt + data_offset, (uint16_t)(len - data_offset)); case PPP_VJ_COMP: case PPP_IPX: @@ -134,15 +158,70 @@ int DecodePPP(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, case PPP_PAP: case PPP_LQM: case PPP_CHAP: + case PPP_CCP: + case PPP_CBCP: + case PPP_COMP_DGRAM: ENGINE_SET_EVENT(p,PPP_UNSUP_PROTO); return TM_ECODE_OK; default: - SCLogDebug("unknown PPP protocol: %" PRIx32 "",SCNtohs(p->ppph->protocol)); + SCLogDebug("unknown PPP protocol: %x", proto); ENGINE_SET_INVALID_EVENT(p, PPP_WRONG_TYPE); return TM_ECODE_OK; } +} +int DecodePPP(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, const uint8_t *pkt, uint32_t len) +{ + DEBUG_VALIDATE_BUG_ON(pkt == NULL); + + StatsIncr(tv, dtv->counter_ppp); + if (unlikely(len < 1)) { + ENGINE_SET_INVALID_EVENT(p, PPP_PKT_TOO_SMALL); + return TM_ECODE_FAILED; + } + + uint16_t proto_offset = 0; + /* 0xff means we have a HDLC header: proto will start at offset 2 */ + if (*pkt == 0xff) { + proto_offset = 2; + /* make sure the proto field at the offset fits */ + if (len < 3) { + ENGINE_SET_INVALID_EVENT(p, PPP_PKT_TOO_SMALL); + return TM_ECODE_FAILED; + } + } + uint8_t proto_size = 0; + uint8_t proto_byte = *(pkt + proto_offset); + /* check if compressed protocol bit is set. */ + if (proto_byte & 0x01) { + proto_size = 1; + } else { + proto_size = 2; + } + if (len < (proto_size + proto_offset)) { + ENGINE_SET_INVALID_EVENT(p, PPP_PKT_TOO_SMALL); + return TM_ECODE_FAILED; + } + if (!PacketIncreaseCheckLayers(p)) { + return TM_ECODE_FAILED; + } + + const uint32_t data_offset = proto_offset + proto_size; + if (data_offset != 4) { + if (proto_size == 1) { + return DecodePPPCompressedProto(tv, dtv, p, pkt, len, proto_offset); + } else { + const uint16_t proto = SCNtohs(*(uint16_t *)(pkt + proto_offset)); + return DecodePPPUncompressedProto(tv, dtv, p, pkt, len, proto, data_offset); + } + } + /* implied proto_offset + proto_size == 4, so continue below */ + + const PPPHdr *ppph = (PPPHdr *)pkt; + SCLogDebug( + "p %p pkt %p PPP protocol %04x Len: %" PRIu32 "", p, pkt, SCNtohs(ppph->protocol), len); + return DecodePPPUncompressedProto(tv, dtv, p, pkt, len, SCNtohs(ppph->protocol), data_offset); } /* TESTS BELOW */ @@ -238,11 +317,6 @@ static int DecodePPPtest03 (void) FlowShutdown(); - if(p->ppph == NULL) { - SCFree(p); - return 0; - } - if(ENGINE_ISSET_EVENT(p,PPP_PKT_TOO_SMALL)) { SCFree(p); return 0; @@ -296,11 +370,6 @@ static int DecodePPPtest04 (void) FlowShutdown(); - if(p->ppph == NULL) { - SCFree(p); - return 0; - } - if (!(ENGINE_ISSET_EVENT(p,IPV4_TRUNC_PKT))) { SCFree(p); return 0; diff --git a/src/decode-ppp.h b/src/decode-ppp.h index f8914cf2e77d..66894b03d710 100644 --- a/src/decode-ppp.h +++ b/src/decode-ppp.h @@ -59,6 +59,9 @@ #define PPP_PAP 0xc023 /* Password Authentication Protocol */ #define PPP_LQM 0xc025 /* Link Quality Monitoring */ #define PPP_CHAP 0xc223 /* Challenge Handshake Authentication Protocol */ +#define PPP_CCP 0x80fd /* Compression Control Protocol */ +#define PPP_CBCP 0xc029 /* Callback Control Protocol CBCP */ +#define PPP_COMP_DGRAM 0x00fd /* Compressed datagram */ /** PPP Packet header */ typedef struct PPPHdr_ { diff --git a/src/decode-pppoe.c b/src/decode-pppoe.c index f884085c650f..eb5e6acb2844 100644 --- a/src/decode-pppoe.c +++ b/src/decode-pppoe.c @@ -80,11 +80,6 @@ int DecodePPPOEDiscovery(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, return TM_ECODE_OK; } - /* parse any tags we have in the packet */ - - uint32_t tag_length = 0; - PPPOEDiscoveryTag* pppoedt = (PPPOEDiscoveryTag*) (p->pppoedh + PPPOE_DISCOVERY_HEADER_MIN_LEN); - uint32_t pppoe_length = SCNtohs(p->pppoedh->pppoe_length); uint32_t packet_length = len - PPPOE_DISCOVERY_HEADER_MIN_LEN ; @@ -97,29 +92,29 @@ int DecodePPPOEDiscovery(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, return TM_ECODE_OK; } - while (pppoedt < (PPPOEDiscoveryTag*) (pkt + (len - sizeof(PPPOEDiscoveryTag))) && pppoe_length >=4 && packet_length >=4) - { #ifdef DEBUG + /* parse any tags we have in the packet */ + + uint32_t tag_length = 0; + const uint8_t *pkt_pppoedt = pkt + PPPOE_DISCOVERY_HEADER_MIN_LEN; + + // packet_length >= pppoe_length so we have enough data + while (pppoe_length >= sizeof(PPPOEDiscoveryTag)) { + PPPOEDiscoveryTag *pppoedt = (PPPOEDiscoveryTag *)pkt_pppoedt; uint16_t tag_type = SCNtohs(pppoedt->pppoe_tag_type); -#endif + // upgrade to u32 to avoid u16 overflow tag_length = SCNtohs(pppoedt->pppoe_tag_length); SCLogDebug ("PPPoE Tag type %x, length %"PRIu32, tag_type, tag_length); if (pppoe_length >= (4 + tag_length)) { pppoe_length -= (4 + tag_length); + pkt_pppoedt = pkt_pppoedt + (4 + tag_length); } else { pppoe_length = 0; // don't want an underflow } - - if (packet_length >= 4 + tag_length) { - packet_length -= (4 + tag_length); - } else { - packet_length = 0; // don't want an underflow - } - - pppoedt = pppoedt + (4 + tag_length); } +#endif return TM_ECODE_OK; } diff --git a/src/decode.c b/src/decode.c index 5cdeeead6b96..13c65413502d 100644 --- a/src/decode.c +++ b/src/decode.c @@ -408,7 +408,6 @@ Packet *PacketDefragPktSetup(Packet *parent, const uint8_t *pkt, uint32_t len, u } p->recursion_level = parent->recursion_level; /* NOT incremented */ p->ts = parent->ts; - p->datalink = DLT_RAW; p->tenant_id = parent->tenant_id; /* tell new packet it's part of a tunnel */ SET_TUNNEL_PKT(p); diff --git a/src/decode.h b/src/decode.h index dedfbb09efd0..da41726e806b 100644 --- a/src/decode.h +++ b/src/decode.h @@ -566,7 +566,6 @@ typedef struct Packet_ ICMPV6Hdr *icmpv6h; - PPPHdr *ppph; PPPOESessionHdr *pppoesh; PPPOEDiscoveryHdr *pppoedh; @@ -974,6 +973,7 @@ void DecodeUnregisterCounters(void); * Libpcap on at least OpenBSD returns 101 as datalink type for RAW pcaps though. */ #define LINKTYPE_RAW2 101 #define LINKTYPE_IPV4 228 +#define LINKTYPE_IPV6 229 #define LINKTYPE_GRE_OVER_IP 778 #define LINKTYPE_CISCO_HDLC DLT_C_HDLC #define PPP_OVER_GRE 11 @@ -983,7 +983,8 @@ void DecodeUnregisterCounters(void); /** Flag to indicate that packet header or contents should not be inspected */ #define PKT_NOPACKET_INSPECTION BIT_U32(0) -// vacancy +/** Packet has a PPP_VJ_UCOMP header */ +#define PKT_PPP_VJ_UCOMP BIT_U32(1) /** Flag to indicate that packet contents should not be inspected */ #define PKT_NOPAYLOAD_INSPECTION BIT_U32(2) diff --git a/src/defrag-hash.c b/src/defrag-hash.c index 2f19ce28ee13..87d40f9b5109 100644 --- a/src/defrag-hash.c +++ b/src/defrag-hash.c @@ -591,7 +591,7 @@ DefragTracker *DefragGetTrackerFromHash (Packet *p) return dt; } - if (DefragTrackerCompare(dt, p) != 0) { + if (!dt->remove && DefragTrackerCompare(dt, p) != 0) { /* we found our tracker, lets put it on top of the * hash list -- this rewards active trackers */ if (dt->hnext) { diff --git a/src/defrag.c b/src/defrag.c index 71cf4204c17a..c5979b285d1b 100644 --- a/src/defrag.c +++ b/src/defrag.c @@ -103,26 +103,6 @@ static DefragContext *defrag_context; RB_GENERATE(IP_FRAGMENTS, Frag_, rb, DefragRbFragCompare); -/** - * Utility/debugging function to dump the frags associated with a - * tracker. Only enable when unit tests are enabled. - */ -#if 0 -#ifdef UNITTESTS -static void -DumpFrags(DefragTracker *tracker) -{ - Frag *frag; - - printf("Dumping frags for packet: ID=%d\n", tracker->id); - TAILQ_FOREACH(frag, &tracker->frags, next) { - printf("-> Frag: frag_offset=%d, frag_len=%d, data_len=%d, ltrim=%d, skip=%d\n", frag->offset, frag->len, frag->data_len, frag->ltrim, frag->skip); - PrintRawDataFp(stdout, frag->pkt, frag->len); - } -} -#endif /* UNITTESTS */ -#endif - /** * \brief Reset a frag for reuse in a pool. */ @@ -266,7 +246,7 @@ Defrag4Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p) } /* Check that we have all the data. Relies on the fact that - * fragments are inserted if frag_offset order. */ + * fragments are inserted in frag_offset order. */ Frag *frag = NULL; size_t len = 0; RB_FOREACH_FROM(frag, IP_FRAGMENTS, first) { @@ -276,7 +256,8 @@ Defrag4Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p) goto done; } else { - len += frag->data_len; + /* Update the packet length to the largest known data offset. */ + len = MAX(len, frag->offset + frag->data_len); } } @@ -288,17 +269,27 @@ Defrag4Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p) } PKT_SET_SRC(rp, PKT_SRC_DEFRAG); rp->flags |= PKT_REBUILT_FRAGMENT; - rp->recursion_level = p->recursion_level; + rp->datalink = tracker->datalink; int fragmentable_offset = 0; uint16_t fragmentable_len = 0; uint16_t hlen = 0; int ip_hdr_offset = 0; + /* Assume more frags. */ + uint16_t prev_offset = 0; + bool more_frags = 1; + RB_FOREACH(frag, IP_FRAGMENTS, &tracker->fragment_tree) { SCLogDebug("frag %p, data_len %u, offset %u, pcap_cnt %"PRIu64, frag, frag->data_len, frag->offset, frag->pcap_cnt); + /* Previous fragment has no more fragments, and this packet + * doesn't overlap. We're done. */ + if (!more_frags && frag->offset > prev_offset) { + break; + } + if (frag->skip) continue; if (frag->ltrim >= frag->data_len) @@ -339,9 +330,16 @@ Defrag4Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p) fragmentable_len = frag->offset + frag->data_len; } - if (!frag->more_frags) { - break; - } + /* Even if this fragment is flagged as having no more + * fragments, still continue. The next fragment may have the + * same offset with data that is preferred. + * + * For example, DefragBsdFragmentAfterNoMfIpv{4,6}Test + * + * This is due to not all fragments being completely trimmed, + * but relying on the copy ordering. */ + more_frags = frag->more_frags; + prev_offset = frag->offset; } SCLogDebug("ip_hdr_offset %u, hlen %" PRIu16 ", fragmentable_len %" PRIu16, ip_hdr_offset, hlen, @@ -417,7 +415,7 @@ Defrag6Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p) goto done; } else { - len += frag->data_len; + len = MAX(len, frag->offset + frag->data_len); } } } @@ -430,13 +428,23 @@ Defrag6Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p) goto error_remove_tracker; } PKT_SET_SRC(rp, PKT_SRC_DEFRAG); + rp->flags |= PKT_REBUILT_FRAGMENT; + rp->datalink = tracker->datalink; uint16_t unfragmentable_len = 0; int fragmentable_offset = 0; uint16_t fragmentable_len = 0; int ip_hdr_offset = 0; uint8_t next_hdr = 0; + + /* Assume more frags. */ + uint16_t prev_offset = 0; + bool more_frags = 1; + RB_FOREACH(frag, IP_FRAGMENTS, &tracker->fragment_tree) { + if (!more_frags && frag->offset > prev_offset) { + break; + } if (frag->skip) continue; if (frag->data_len - frag->ltrim <= 0) @@ -481,9 +489,16 @@ Defrag6Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p) fragmentable_len = frag->offset + frag->data_len; } - if (!frag->more_frags) { - break; - } + /* Even if this fragment is flagged as having no more + * fragments, still continue. The next fragment may have the + * same offset with data that is preferred. + * + * For example, DefragBsdFragmentAfterNoMfIpv{4,6}Test + * + * This is due to not all fragments being completely trimmed, + * but relying on the copy ordering. */ + more_frags = frag->more_frags; + prev_offset = frag->offset; } rp->ip6h = (IPV6Hdr *)(GET_PKT_DATA(rp) + ip_hdr_offset); @@ -660,16 +675,45 @@ DefragInsertFrag(ThreadVars *tv, DecodeThreadVars *dtv, DefragTracker *tracker, switch (tracker->policy) { case DEFRAG_POLICY_BSD: if (frag_offset < prev->offset + prev->data_len) { - if (frag_offset >= prev->offset) { - ltrim = prev->offset + prev->data_len - frag_offset; + if (prev->offset <= frag_offset) { + /* We prefer the data from the previous + * fragment, so trim off the data in the new + * fragment that exists in the previous + * fragment. */ + uint16_t prev_end = prev->offset + prev->data_len; + if (prev_end > frag_end) { + /* Just skip. */ + /* TODO: Set overlap flag. */ + goto done; + } + ltrim = prev_end - frag_offset; + + if ((next != NULL) && (frag_end > next->offset)) { + next->ltrim = frag_end - next->offset; + } + + goto insert; } + + /* If the end of this fragment overlaps the start + * of the previous fragment, then trim up the + * start of previous fragment so this fragment is + * used. + * + * See: + * DefragBsdSubsequentOverlapsStartOfOriginal. + */ + if (frag_offset <= prev->offset && frag_end > prev->offset + prev->ltrim) { + uint16_t prev_ltrim = frag_end - prev->offset; + if (prev_ltrim > prev->ltrim) { + prev->ltrim = prev_ltrim; + } + } + if ((next != NULL) && (frag_end > next->offset)) { next->ltrim = frag_end - next->offset; } - if ((frag_offset < prev->offset) && - (frag_end >= prev->offset + prev->data_len)) { - prev->skip = 1; - } + goto insert; } break; @@ -861,6 +905,9 @@ DefragInsertFrag(ThreadVars *tv, DecodeThreadVars *dtv, DefragTracker *tracker, #ifdef DEBUG new->pcap_cnt = pcap_cnt; #endif + if (frag_offset == 0) { + tracker->datalink = p->datalink; + } IP_FRAGMENTS_RB_INSERT(&tracker->fragment_tree, new); @@ -1093,8 +1140,8 @@ void DefragDestroy(void) * Allocate a test packet. Nothing to fancy, just a simple IP packet * with some payload of no particular protocol. */ -static Packet *BuildTestPacket(uint8_t proto, uint16_t id, uint16_t off, int mf, - const char content, int content_len) +static Packet *BuildIpv4TestPacket( + uint8_t proto, uint16_t id, uint16_t off, int mf, const char content, int content_len) { Packet *p = NULL; int hlen = 20; @@ -1167,8 +1214,79 @@ static Packet *BuildTestPacket(uint8_t proto, uint16_t id, uint16_t off, int mf, return NULL; } -static Packet *IPV6BuildTestPacket(uint8_t proto, uint32_t id, uint16_t off, - int mf, const char content, int content_len) +/** + * Allocate a test packet, much like BuildIpv4TestPacket, but with + * the full content provided by the caller. + */ +static Packet *BuildIpv4TestPacketWithContent( + uint8_t proto, uint16_t id, uint16_t off, int mf, const uint8_t *content, int content_len) +{ + Packet *p = NULL; + int hlen = 20; + int ttl = 64; + IPV4Hdr ip4h; + + p = SCCalloc(1, sizeof(*p) + default_packet_size); + if (unlikely(p == NULL)) + return NULL; + + PacketInit(p); + + struct timeval tval; + gettimeofday(&tval, NULL); + p->ts = SCTIME_FROM_TIMEVAL(&tval); + ip4h.ip_verhl = 4 << 4; + ip4h.ip_verhl |= hlen >> 2; + ip4h.ip_len = htons(hlen + content_len); + ip4h.ip_id = htons(id); + if (mf) + ip4h.ip_off = htons(IP_MF | off); + else + ip4h.ip_off = htons(off); + ip4h.ip_ttl = ttl; + ip4h.ip_proto = proto; + + ip4h.s_ip_src.s_addr = 0x01010101; /* 1.1.1.1 */ + ip4h.s_ip_dst.s_addr = 0x02020202; /* 2.2.2.2 */ + + /* copy content_len crap, we need full length */ + PacketCopyData(p, (uint8_t *)&ip4h, sizeof(ip4h)); + p->ip4h = (IPV4Hdr *)GET_PKT_DATA(p); + SET_IPV4_SRC_ADDR(p, &p->src); + SET_IPV4_DST_ADDR(p, &p->dst); + + PacketCopyDataOffset(p, hlen, content, content_len); + SET_PKT_LEN(p, hlen + content_len); + + p->ip4h->ip_csum = IPV4Checksum((uint16_t *)GET_PKT_DATA(p), hlen, 0); + + /* Self test. */ + if (IPV4_GET_VER(p) != 4) + goto error; + if (IPV4_GET_HLEN(p) != hlen) + goto error; + if (IPV4_GET_IPLEN(p) != hlen + content_len) + goto error; + if (IPV4_GET_IPID(p) != id) + goto error; + if (IPV4_GET_IPOFFSET(p) != off) + goto error; + if (IPV4_GET_MF(p) != mf) + goto error; + if (IPV4_GET_IPTTL(p) != ttl) + goto error; + if (IPV4_GET_IPPROTO(p) != proto) + goto error; + + return p; +error: + if (p != NULL) + SCFree(p); + return NULL; +} + +static Packet *BuildIpv6TestPacket( + uint8_t proto, uint32_t id, uint16_t off, int mf, const uint8_t content, int content_len) { Packet *p = NULL; uint8_t *pcontent; @@ -1238,6 +1356,71 @@ static Packet *IPV6BuildTestPacket(uint8_t proto, uint32_t id, uint16_t off, return NULL; } +static Packet *BuildIpv6TestPacketWithContent( + uint8_t proto, uint32_t id, uint16_t off, int mf, const uint8_t *content, int content_len) +{ + Packet *p = NULL; + IPV6Hdr ip6h; + + p = SCCalloc(1, sizeof(*p) + default_packet_size); + if (unlikely(p == NULL)) + return NULL; + + PacketInit(p); + + struct timeval tval; + gettimeofday(&tval, NULL); + p->ts = SCTIME_FROM_TIMEVAL(&tval); + + ip6h.s_ip6_nxt = 44; + ip6h.s_ip6_hlim = 2; + + /* Source and dest address - very bogus addresses. */ + ip6h.s_ip6_src[0] = 0x01010101; + ip6h.s_ip6_src[1] = 0x01010101; + ip6h.s_ip6_src[2] = 0x01010101; + ip6h.s_ip6_src[3] = 0x01010101; + ip6h.s_ip6_dst[0] = 0x02020202; + ip6h.s_ip6_dst[1] = 0x02020202; + ip6h.s_ip6_dst[2] = 0x02020202; + ip6h.s_ip6_dst[3] = 0x02020202; + + /* copy content_len crap, we need full length */ + PacketCopyData(p, (uint8_t *)&ip6h, sizeof(IPV6Hdr)); + + p->ip6h = (IPV6Hdr *)GET_PKT_DATA(p); + IPV6_SET_RAW_VER(p->ip6h, 6); + /* Fragmentation header. */ + IPV6FragHdr *fh = (IPV6FragHdr *)(GET_PKT_DATA(p) + sizeof(IPV6Hdr)); + fh->ip6fh_nxt = proto; + fh->ip6fh_ident = htonl(id); + fh->ip6fh_offlg = htons((off << 3) | mf); + + DecodeIPV6FragHeader(p, (uint8_t *)fh, 8, 8 + content_len, 0); + + PacketCopyDataOffset(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr), content, content_len); + SET_PKT_LEN(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr) + content_len); + + p->ip6h->s_ip6_plen = htons(sizeof(IPV6FragHdr) + content_len); + + SET_IPV6_SRC_ADDR(p, &p->src); + SET_IPV6_DST_ADDR(p, &p->dst); + + /* Self test. */ + if (IPV6_GET_VER(p) != 6) + goto error; + if (IPV6_GET_NH(p) != 44) + goto error; + if (IPV6_GET_PLEN(p) != sizeof(IPV6FragHdr) + content_len) + goto error; + + return p; +error: + if (p != NULL) + SCFree(p); + return NULL; +} + /** * Test the simplest possible re-assembly scenario. All packet in * order and no overlaps. @@ -1251,11 +1434,11 @@ static int DefragInOrderSimpleTest(void) DefragInit(); - p1 = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8); + p1 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8); FAIL_IF_NULL(p1); - p2 = BuildTestPacket(IPPROTO_ICMP, id, 1, 1, 'B', 8); + p2 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 1, 1, 'B', 8); FAIL_IF_NULL(p2); - p3 = BuildTestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3); + p3 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3); FAIL_IF_NULL(p3); FAIL_IF(Defrag(NULL, NULL, p1) != NULL); @@ -1303,11 +1486,11 @@ static int DefragReverseSimpleTest(void) DefragInit(); - p1 = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8); + p1 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8); FAIL_IF_NULL(p1); - p2 = BuildTestPacket(IPPROTO_ICMP, id, 1, 1, 'B', 8); + p2 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 1, 1, 'B', 8); FAIL_IF_NULL(p2); - p3 = BuildTestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3); + p3 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3); FAIL_IF_NULL(p3); FAIL_IF(Defrag(NULL, NULL, p3) != NULL); @@ -1347,7 +1530,7 @@ static int DefragReverseSimpleTest(void) * Test the simplest possible re-assembly scenario. All packet in * order and no overlaps. */ -static int IPV6DefragInOrderSimpleTest(void) +static int DefragInOrderSimpleIpv6Test(void) { Packet *p1 = NULL, *p2 = NULL, *p3 = NULL; Packet *reassembled = NULL; @@ -1356,11 +1539,11 @@ static int IPV6DefragInOrderSimpleTest(void) DefragInit(); - p1 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 8); + p1 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 8); FAIL_IF_NULL(p1); - p2 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 1, 1, 'B', 8); + p2 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 1, 1, 'B', 8); FAIL_IF_NULL(p2); - p3 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 2, 0, 'C', 3); + p3 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 2, 0, 'C', 3); FAIL_IF_NULL(p3); FAIL_IF(Defrag(NULL, NULL, p1) != NULL); @@ -1394,7 +1577,7 @@ static int IPV6DefragInOrderSimpleTest(void) PASS; } -static int IPV6DefragReverseSimpleTest(void) +static int DefragReverseSimpleIpv6Test(void) { DefragContext *dc = NULL; Packet *p1 = NULL, *p2 = NULL, *p3 = NULL; @@ -1407,11 +1590,11 @@ static int IPV6DefragReverseSimpleTest(void) dc = DefragContextNew(); FAIL_IF_NULL(dc); - p1 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 8); + p1 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 8); FAIL_IF_NULL(p1); - p2 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 1, 1, 'B', 8); + p2 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 1, 1, 'B', 8); FAIL_IF_NULL(p2); - p3 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 2, 0, 'C', 3); + p3 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 2, 0, 'C', 3); FAIL_IF_NULL(p3); FAIL_IF(Defrag(NULL, NULL, p3) != NULL); @@ -1444,8 +1627,7 @@ static int IPV6DefragReverseSimpleTest(void) PASS; } -static int DefragDoSturgesNovakTest(int policy, u_char *expected, - size_t expected_len) +static int DefragDoSturgesNovakTest(int policy, uint8_t *expected, size_t expected_len) { int i; @@ -1463,60 +1645,60 @@ static int DefragDoSturgesNovakTest(int policy, u_char *expected, * Original fragments. */ - /* A*24 at 0. */ - packets[0] = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 24); + /* <1> A*24 at 0. */ + packets[0] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 24); - /* B*15 at 32. */ - packets[1] = BuildTestPacket(IPPROTO_ICMP, id, 32 >> 3, 1, 'B', 16); + /* <2> B*16 at 32. */ + packets[1] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 32 >> 3, 1, 'B', 16); - /* C*24 at 48. */ - packets[2] = BuildTestPacket(IPPROTO_ICMP, id, 48 >> 3, 1, 'C', 24); + /* <3> C*24 at 48. */ + packets[2] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 48 >> 3, 1, 'C', 24); - /* D*8 at 80. */ - packets[3] = BuildTestPacket(IPPROTO_ICMP, id, 80 >> 3, 1, 'D', 8); + /* <3_1> D*8 at 80. */ + packets[3] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 80 >> 3, 1, 'D', 8); - /* E*16 at 104. */ - packets[4] = BuildTestPacket(IPPROTO_ICMP, id, 104 >> 3, 1, 'E', 16); + /* <3_2> E*16 at 104. */ + packets[4] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 104 >> 3, 1, 'E', 16); - /* F*24 at 120. */ - packets[5] = BuildTestPacket(IPPROTO_ICMP, id, 120 >> 3, 1, 'F', 24); + /* <3_3> F*24 at 120. */ + packets[5] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 120 >> 3, 1, 'F', 24); - /* G*16 at 144. */ - packets[6] = BuildTestPacket(IPPROTO_ICMP, id, 144 >> 3, 1, 'G', 16); + /* <3_4> G*16 at 144. */ + packets[6] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 144 >> 3, 1, 'G', 16); - /* H*16 at 160. */ - packets[7] = BuildTestPacket(IPPROTO_ICMP, id, 160 >> 3, 1, 'H', 16); + /* <3_5> H*16 at 160. */ + packets[7] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 160 >> 3, 1, 'H', 16); - /* I*8 at 176. */ - packets[8] = BuildTestPacket(IPPROTO_ICMP, id, 176 >> 3, 1, 'I', 8); + /* <3_6> I*8 at 176. */ + packets[8] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 176 >> 3, 1, 'I', 8); /* * Overlapping subsequent fragments. */ - /* J*32 at 8. */ - packets[9] = BuildTestPacket(IPPROTO_ICMP, id, 8 >> 3, 1, 'J', 32); + /* <4> J*32 at 8. */ + packets[9] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 8 >> 3, 1, 'J', 32); - /* K*24 at 48. */ - packets[10] = BuildTestPacket(IPPROTO_ICMP, id, 48 >> 3, 1, 'K', 24); + /* <5> K*24 at 48. */ + packets[10] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 48 >> 3, 1, 'K', 24); - /* L*24 at 72. */ - packets[11] = BuildTestPacket(IPPROTO_ICMP, id, 72 >> 3, 1, 'L', 24); + /* <6> L*24 at 72. */ + packets[11] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 72 >> 3, 1, 'L', 24); - /* M*24 at 96. */ - packets[12] = BuildTestPacket(IPPROTO_ICMP, id, 96 >> 3, 1, 'M', 24); + /* <7> M*24 at 96. */ + packets[12] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 96 >> 3, 1, 'M', 24); - /* N*8 at 128. */ - packets[13] = BuildTestPacket(IPPROTO_ICMP, id, 128 >> 3, 1, 'N', 8); + /* <8> N*8 at 128. */ + packets[13] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 128 >> 3, 1, 'N', 8); - /* O*8 at 152. */ - packets[14] = BuildTestPacket(IPPROTO_ICMP, id, 152 >> 3, 1, 'O', 8); + /* <9> O*8 at 152. */ + packets[14] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 152 >> 3, 1, 'O', 8); - /* P*8 at 160. */ - packets[15] = BuildTestPacket(IPPROTO_ICMP, id, 160 >> 3, 1, 'P', 8); + /* <10> P*8 at 160. */ + packets[15] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 160 >> 3, 1, 'P', 8); - /* Q*16 at 176. */ - packets[16] = BuildTestPacket(IPPROTO_ICMP, id, 176 >> 3, 0, 'Q', 16); + /* <11> Q*16 at 176. */ + packets[16] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 176 >> 3, 0, 'Q', 16); default_policy = policy; @@ -1542,8 +1724,15 @@ static int DefragDoSturgesNovakTest(int policy, u_char *expected, FAIL_IF(IPV4_GET_HLEN(reassembled) != 20); FAIL_IF(IPV4_GET_IPLEN(reassembled) != 20 + 192); - - FAIL_IF(memcmp(GET_PKT_DATA(reassembled) + 20, expected, expected_len) != 0); + FAIL_IF(expected_len != 192); + + if (memcmp(expected, GET_PKT_DATA(reassembled) + 20, expected_len) != 0) { + printf("Expected:\n"); + PrintRawDataFp(stdout, expected, expected_len); + printf("Got:\n"); + PrintRawDataFp(stdout, GET_PKT_DATA(reassembled) + 20, GET_PKT_LEN(reassembled) - 20); + FAIL; + } SCFree(reassembled); /* Make sure all frags were returned back to the pool. */ @@ -1556,8 +1745,7 @@ static int DefragDoSturgesNovakTest(int policy, u_char *expected, PASS; } -static int IPV6DefragDoSturgesNovakTest(int policy, u_char *expected, - size_t expected_len) +static int DefragDoSturgesNovakIpv6Test(int policy, uint8_t *expected, size_t expected_len) { int i; @@ -1575,60 +1763,60 @@ static int IPV6DefragDoSturgesNovakTest(int policy, u_char *expected, * Original fragments. */ - /* A*24 at 0. */ - packets[0] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 24); + /* <1> A*24 at 0. */ + packets[0] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 24); - /* B*15 at 32. */ - packets[1] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 32 >> 3, 1, 'B', 16); + /* <2> B*16 at 32. */ + packets[1] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 32 >> 3, 1, 'B', 16); - /* C*24 at 48. */ - packets[2] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'C', 24); + /* <3> C*24 at 48. */ + packets[2] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'C', 24); - /* D*8 at 80. */ - packets[3] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 80 >> 3, 1, 'D', 8); + /* <3_1> D*8 at 80. */ + packets[3] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 80 >> 3, 1, 'D', 8); - /* E*16 at 104. */ - packets[4] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 104 >> 3, 1, 'E', 16); + /* <3_2> E*16 at 104. */ + packets[4] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 104 >> 3, 1, 'E', 16); - /* F*24 at 120. */ - packets[5] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 120 >> 3, 1, 'F', 24); + /* <3_3> F*24 at 120. */ + packets[5] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 120 >> 3, 1, 'F', 24); - /* G*16 at 144. */ - packets[6] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 144 >> 3, 1, 'G', 16); + /* <3_4> G*16 at 144. */ + packets[6] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 144 >> 3, 1, 'G', 16); - /* H*16 at 160. */ - packets[7] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'H', 16); + /* <3_5> H*16 at 160. */ + packets[7] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'H', 16); - /* I*8 at 176. */ - packets[8] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 1, 'I', 8); + /* <3_6> I*8 at 176. */ + packets[8] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 1, 'I', 8); /* * Overlapping subsequent fragments. */ - /* J*32 at 8. */ - packets[9] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 8 >> 3, 1, 'J', 32); + /* <4> J*32 at 8. */ + packets[9] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 8 >> 3, 1, 'J', 32); - /* K*24 at 48. */ - packets[10] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'K', 24); + /* <5> K*24 at 48. */ + packets[10] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'K', 24); - /* L*24 at 72. */ - packets[11] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 72 >> 3, 1, 'L', 24); + /* <6> L*24 at 72. */ + packets[11] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 72 >> 3, 1, 'L', 24); - /* M*24 at 96. */ - packets[12] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 96 >> 3, 1, 'M', 24); + /* <7> M*24 at 96. */ + packets[12] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 96 >> 3, 1, 'M', 24); - /* N*8 at 128. */ - packets[13] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 128 >> 3, 1, 'N', 8); + /* <8> N*8 at 128. */ + packets[13] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 128 >> 3, 1, 'N', 8); - /* O*8 at 152. */ - packets[14] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 152 >> 3, 1, 'O', 8); + /* <9> O*8 at 152. */ + packets[14] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 152 >> 3, 1, 'O', 8); - /* P*8 at 160. */ - packets[15] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'P', 8); + /* <10> P*8 at 160. */ + packets[15] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'P', 8); - /* Q*16 at 176. */ - packets[16] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 0, 'Q', 16); + /* <11> Q*16 at 176. */ + packets[16] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 0, 'Q', 16); default_policy = policy; @@ -1667,35 +1855,61 @@ static int IPV6DefragDoSturgesNovakTest(int policy, u_char *expected, PASS; } +/* Define data that matches the naming "Target-Based Fragmentation + * Reassembly". + * + * For example, the data refers to a fragment of data as <1>, or <3_6> + * and uses these to diagram the input fragments and the resulting + * policies. We build test cases for the papers scenario but assign + * specific values to each segment. + */ +#define D_1 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A' +#define D_2 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B' +#define D_3 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C' +#define D_3_1 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D' +#define D_3_2 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E' +#define D_3_3 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F' +#define D_3_4 'G', 'G', 'G', 'G', 'G', 'G', 'G', 'G' +#define D_3_5 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H' +#define D_3_6 'I', 'I', 'I', 'I', 'I', 'I', 'I', 'I' +#define D_4 'J', 'J', 'J', 'J', 'J', 'J', 'J', 'J' +#define D_5 'K', 'K', 'K', 'K', 'K', 'K', 'K', 'K' +#define D_6 'L', 'L', 'L', 'L', 'L', 'L', 'L', 'L' +#define D_7 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M' +#define D_8 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N' +#define D_9 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O' +#define D_10 'P', 'P', 'P', 'P', 'P', 'P', 'P', 'P' +#define D_11 'Q', 'Q', 'Q', 'Q', 'Q', 'Q', 'Q', 'Q' + static int DefragSturgesNovakBsdTest(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "JJJJJJJJ" - "BBBBBBBB" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "MMMMMMMM" - "MMMMMMMM" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "HHHHHHHH" - "HHHHHHHH" - "IIIIIIII" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_4, + D_2, + D_3, + D_3, + D_3, + D_6, + D_6, + D_6, + D_7, + D_7, + D_7, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_3_5, + D_3_5, + D_3_6, + D_11, }; FAIL_IF_NOT(DefragDoSturgesNovakTest(DEFRAG_POLICY_BSD, expected, @@ -1703,69 +1917,68 @@ DefragSturgesNovakBsdTest(void) PASS; } -static int IPV6DefragSturgesNovakBsdTest(void) +static int DefragSturgesNovakBsdIpv6Test(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "JJJJJJJJ" - "BBBBBBBB" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "MMMMMMMM" - "MMMMMMMM" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "HHHHHHHH" - "HHHHHHHH" - "IIIIIIII" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_4, + D_2, + D_3, + D_3, + D_3, + D_6, + D_6, + D_6, + D_7, + D_7, + D_7, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_3_5, + D_3_5, + D_3_6, + D_11, }; - FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_BSD, expected, - sizeof(expected))); + FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_BSD, expected, sizeof(expected))); PASS; } static int DefragSturgesNovakLinuxIpv4Test(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "JJJJJJJJ" - "BBBBBBBB" - "KKKKKKKK" - "KKKKKKKK" - "KKKKKKKK" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "MMMMMMMM" - "MMMMMMMM" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "PPPPPPPP" - "HHHHHHHH" - "QQQQQQQQ" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_4, + D_2, + D_5, + D_5, + D_5, + D_6, + D_6, + D_6, + D_7, + D_7, + D_7, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_10, + D_3_5, + D_11, + D_11, }; FAIL_IF_NOT(DefragDoSturgesNovakTest(DEFRAG_POLICY_LINUX, expected, @@ -1773,69 +1986,68 @@ static int DefragSturgesNovakLinuxIpv4Test(void) PASS; } -static int IPV6DefragSturgesNovakLinuxTest(void) +static int DefragSturgesNovakLinuxIpv6Test(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "JJJJJJJJ" - "BBBBBBBB" - "KKKKKKKK" - "KKKKKKKK" - "KKKKKKKK" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "MMMMMMMM" - "MMMMMMMM" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "PPPPPPPP" - "HHHHHHHH" - "QQQQQQQQ" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_4, + D_2, + D_5, + D_5, + D_5, + D_6, + D_6, + D_6, + D_7, + D_7, + D_7, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_10, + D_3_5, + D_11, + D_11, }; - FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_LINUX, expected, - sizeof(expected))); + FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_LINUX, expected, sizeof(expected))); PASS; } static int DefragSturgesNovakWindowsIpv4Test(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "BBBBBBBB" - "BBBBBBBB" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "EEEEEEEE" - "EEEEEEEE" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "HHHHHHHH" - "HHHHHHHH" - "IIIIIIII" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_2, + D_2, + D_3, + D_3, + D_3, + D_6, + D_6, + D_6, + D_7, + D_3_2, + D_3_2, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_3_5, + D_3_5, + D_3_6, + D_11, }; FAIL_IF_NOT(DefragDoSturgesNovakTest(DEFRAG_POLICY_WINDOWS, expected, @@ -1843,69 +2055,68 @@ static int DefragSturgesNovakWindowsIpv4Test(void) PASS; } -static int IPV6DefragSturgesNovakWindowsTest(void) +static int DefragSturgesNovakWindowsIpv6Test(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "BBBBBBBB" - "BBBBBBBB" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "EEEEEEEE" - "EEEEEEEE" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "HHHHHHHH" - "HHHHHHHH" - "IIIIIIII" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_2, + D_2, + D_3, + D_3, + D_3, + D_6, + D_6, + D_6, + D_7, + D_3_2, + D_3_2, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_3_5, + D_3_5, + D_3_6, + D_11, }; - FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_WINDOWS, expected, - sizeof(expected))); + FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_WINDOWS, expected, sizeof(expected))); PASS; } static int DefragSturgesNovakSolarisTest(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "BBBBBBBB" - "BBBBBBBB" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "MMMMMMMM" - "MMMMMMMM" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "HHHHHHHH" - "HHHHHHHH" - "IIIIIIII" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_2, + D_2, + D_3, + D_3, + D_3, + D_6, + D_6, + D_6, + D_7, + D_7, + D_7, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_3_5, + D_3_5, + D_3_6, + D_11, }; FAIL_IF_NOT(DefragDoSturgesNovakTest(DEFRAG_POLICY_SOLARIS, expected, @@ -1913,69 +2124,68 @@ static int DefragSturgesNovakSolarisTest(void) PASS; } -static int IPV6DefragSturgesNovakSolarisTest(void) +static int DefragSturgesNovakSolarisIpv6Test(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "BBBBBBBB" - "BBBBBBBB" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "MMMMMMMM" - "MMMMMMMM" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "HHHHHHHH" - "HHHHHHHH" - "IIIIIIII" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_2, + D_2, + D_3, + D_3, + D_3, + D_6, + D_6, + D_6, + D_7, + D_7, + D_7, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_3_5, + D_3_5, + D_3_6, + D_11, }; - FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_SOLARIS, expected, - sizeof(expected))); + FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_SOLARIS, expected, sizeof(expected))); PASS; } static int DefragSturgesNovakFirstTest(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "BBBBBBBB" - "BBBBBBBB" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "LLLLLLLL" - "DDDDDDDD" - "LLLLLLLL" - "MMMMMMMM" - "EEEEEEEE" - "EEEEEEEE" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "HHHHHHHH" - "HHHHHHHH" - "IIIIIIII" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_2, + D_2, + D_3, + D_3, + D_3, + D_6, + D_3_1, + D_6, + D_7, + D_3_2, + D_3_2, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_3_5, + D_3_5, + D_3_6, + D_11, }; FAIL_IF_NOT(DefragDoSturgesNovakTest(DEFRAG_POLICY_FIRST, expected, @@ -1983,69 +2193,68 @@ static int DefragSturgesNovakFirstTest(void) PASS; } -static int IPV6DefragSturgesNovakFirstTest(void) +static int DefragSturgesNovakFirstIpv6Test(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "JJJJJJJJ" - "BBBBBBBB" - "BBBBBBBB" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "LLLLLLLL" - "DDDDDDDD" - "LLLLLLLL" - "MMMMMMMM" - "EEEEEEEE" - "EEEEEEEE" - "FFFFFFFF" - "FFFFFFFF" - "FFFFFFFF" - "GGGGGGGG" - "GGGGGGGG" - "HHHHHHHH" - "HHHHHHHH" - "IIIIIIII" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_1, + D_1, + D_4, + D_2, + D_2, + D_3, + D_3, + D_3, + D_6, + D_3_1, + D_6, + D_7, + D_3_2, + D_3_2, + D_3_3, + D_3_3, + D_3_3, + D_3_4, + D_3_4, + D_3_5, + D_3_5, + D_3_6, + D_11, }; - return IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_FIRST, expected, - sizeof(expected)); + return DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_FIRST, expected, sizeof(expected)); } static int DefragSturgesNovakLastTest(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "JJJJJJJJ" - "JJJJJJJJ" - "JJJJJJJJ" - "JJJJJJJJ" - "BBBBBBBB" - "KKKKKKKK" - "KKKKKKKK" - "KKKKKKKK" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "MMMMMMMM" - "MMMMMMMM" - "FFFFFFFF" - "NNNNNNNN" - "FFFFFFFF" - "GGGGGGGG" - "OOOOOOOO" - "PPPPPPPP" - "HHHHHHHH" - "QQQQQQQQ" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_4, + D_4, + D_4, + D_4, + D_2, + D_5, + D_5, + D_5, + D_6, + D_6, + D_6, + D_7, + D_7, + D_7, + D_3_3, + D_8, + D_3_3, + D_3_4, + D_9, + D_10, + D_3_5, + D_11, + D_11, }; FAIL_IF_NOT(DefragDoSturgesNovakTest(DEFRAG_POLICY_LAST, expected, @@ -2053,38 +2262,37 @@ DefragSturgesNovakLastTest(void) PASS; } -static int IPV6DefragSturgesNovakLastTest(void) +static int DefragSturgesNovakLastIpv6Test(void) { /* Expected data. */ - u_char expected[] = { - "AAAAAAAA" - "JJJJJJJJ" - "JJJJJJJJ" - "JJJJJJJJ" - "JJJJJJJJ" - "BBBBBBBB" - "KKKKKKKK" - "KKKKKKKK" - "KKKKKKKK" - "LLLLLLLL" - "LLLLLLLL" - "LLLLLLLL" - "MMMMMMMM" - "MMMMMMMM" - "MMMMMMMM" - "FFFFFFFF" - "NNNNNNNN" - "FFFFFFFF" - "GGGGGGGG" - "OOOOOOOO" - "PPPPPPPP" - "HHHHHHHH" - "QQQQQQQQ" - "QQQQQQQQ" + uint8_t expected[] = { + D_1, + D_4, + D_4, + D_4, + D_4, + D_2, + D_5, + D_5, + D_5, + D_6, + D_6, + D_6, + D_7, + D_7, + D_7, + D_3_3, + D_8, + D_3_3, + D_3_4, + D_9, + D_10, + D_3_5, + D_11, + D_11, }; - FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_LAST, expected, - sizeof(expected))); + FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_LAST, expected, sizeof(expected))); PASS; } @@ -2099,7 +2307,7 @@ static int DefragTimeoutTest(void) /* Load in 16 packets. */ for (i = 0; i < 16; i++) { - Packet *p = BuildTestPacket(IPPROTO_ICMP,i, 0, 1, 'A' + i, 16); + Packet *p = BuildIpv4TestPacket(IPPROTO_ICMP, i, 0, 1, 'A' + i, 16); FAIL_IF_NULL(p); Packet *tp = Defrag(NULL, NULL, p); @@ -2109,7 +2317,7 @@ static int DefragTimeoutTest(void) /* Build a new packet but push the timestamp out by our timeout. * This should force our previous fragments to be timed out. */ - Packet *p = BuildTestPacket(IPPROTO_ICMP, 99, 0, 1, 'A' + i, 16); + Packet *p = BuildIpv4TestPacket(IPPROTO_ICMP, 99, 0, 1, 'A' + i, 16); FAIL_IF_NULL(p); p->ts = SCTIME_ADD_SECS(p->ts, defrag_context->timeout + 1); @@ -2134,7 +2342,7 @@ static int DefragTimeoutTest(void) * fail. The fix was simple, but this unit test is just to make sure * its not introduced. */ -static int DefragIPv4NoDataTest(void) +static int DefragNoDataIpv4Test(void) { DefragContext *dc = NULL; Packet *p = NULL; @@ -2146,7 +2354,7 @@ static int DefragIPv4NoDataTest(void) FAIL_IF_NULL(dc); /* This packet has an offset > 0, more frags set to 0 and no data. */ - p = BuildTestPacket(IPPROTO_ICMP, id, 1, 0, 'A', 0); + p = BuildIpv4TestPacket(IPPROTO_ICMP, id, 1, 0, 'A', 0); FAIL_IF_NULL(p); /* We do not expect a packet returned. */ @@ -2163,7 +2371,7 @@ static int DefragIPv4NoDataTest(void) PASS; } -static int DefragIPv4TooLargeTest(void) +static int DefragTooLargeIpv4Test(void) { DefragContext *dc = NULL; Packet *p = NULL; @@ -2175,7 +2383,7 @@ static int DefragIPv4TooLargeTest(void) /* Create a fragment that would extend past the max allowable size * for an IPv4 packet. */ - p = BuildTestPacket(IPPROTO_ICMP, 1, 8183, 0, 'A', 71); + p = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 8183, 0, 'A', 71); FAIL_IF_NULL(p); /* We do not expect a packet returned. */ @@ -2206,9 +2414,9 @@ static int DefragVlanTest(void) DefragInit(); - p1 = BuildTestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8); + p1 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8); FAIL_IF_NULL(p1); - p2 = BuildTestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8); + p2 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8); FAIL_IF_NULL(p2); /* With no VLAN IDs set, packets should re-assemble. */ @@ -2238,9 +2446,9 @@ static int DefragVlanQinQTest(void) DefragInit(); - p1 = BuildTestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8); + p1 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8); FAIL_IF_NULL(p1); - p2 = BuildTestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8); + p2 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8); FAIL_IF_NULL(p2); /* With no VLAN IDs set, packets should re-assemble. */ @@ -2272,9 +2480,9 @@ static int DefragVlanQinQinQTest(void) DefragInit(); - Packet *p1 = BuildTestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8); + Packet *p1 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8); FAIL_IF_NULL(p1); - Packet *p2 = BuildTestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8); + Packet *p2 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8); FAIL_IF_NULL(p2); /* With no VLAN IDs set, packets should re-assemble. */ @@ -2308,7 +2516,7 @@ static int DefragTrackerReuseTest(void) /* Build a packet, its not a fragment but shouldn't matter for * this test. */ - p1 = BuildTestPacket(IPPROTO_ICMP, id, 0, 0, 'A', 8); + p1 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 0, 'A', 8); FAIL_IF_NULL(p1); /* Get a tracker. It shouldn't look like its already in use. */ @@ -2355,9 +2563,9 @@ static int DefragMfIpv4Test(void) DefragInit(); - Packet *p1 = BuildTestPacket(IPPROTO_ICMP, ip_id, 2, 1, 'C', 8); - Packet *p2 = BuildTestPacket(IPPROTO_ICMP, ip_id, 0, 1, 'A', 8); - Packet *p3 = BuildTestPacket(IPPROTO_ICMP, ip_id, 1, 0, 'B', 8); + Packet *p1 = BuildIpv4TestPacket(IPPROTO_ICMP, ip_id, 2, 1, 'C', 8); + Packet *p2 = BuildIpv4TestPacket(IPPROTO_ICMP, ip_id, 0, 1, 'A', 8); + Packet *p3 = BuildIpv4TestPacket(IPPROTO_ICMP, ip_id, 1, 0, 'B', 8); FAIL_IF(p1 == NULL || p2 == NULL || p3 == NULL); p = Defrag(NULL, NULL, p1); @@ -2374,6 +2582,10 @@ static int DefragMfIpv4Test(void) * fragments should be in the re-assembled packet. */ FAIL_IF(IPV4_GET_IPLEN(p) != 36); + /* Verify the payload of the IPv4 packet. */ + uint8_t expected_payload[] = "AAAAAAAABBBBBBBB"; + FAIL_IF(memcmp(GET_PKT_DATA(p) + sizeof(IPV4Hdr), expected_payload, sizeof(expected_payload))); + SCFree(p1); SCFree(p2); SCFree(p3); @@ -2398,9 +2610,9 @@ static int DefragMfIpv6Test(void) DefragInit(); - Packet *p1 = IPV6BuildTestPacket(IPPROTO_ICMPV6, ip_id, 2, 1, 'C', 8); - Packet *p2 = IPV6BuildTestPacket(IPPROTO_ICMPV6, ip_id, 0, 1, 'A', 8); - Packet *p3 = IPV6BuildTestPacket(IPPROTO_ICMPV6, ip_id, 1, 0, 'B', 8); + Packet *p1 = BuildIpv6TestPacket(IPPROTO_ICMPV6, ip_id, 2, 1, 'C', 8); + Packet *p2 = BuildIpv6TestPacket(IPPROTO_ICMPV6, ip_id, 0, 1, 'A', 8); + Packet *p3 = BuildIpv6TestPacket(IPPROTO_ICMPV6, ip_id, 1, 0, 'B', 8); FAIL_IF(p1 == NULL || p2 == NULL || p3 == NULL); p = Defrag(NULL, NULL, p1); @@ -2417,6 +2629,10 @@ static int DefragMfIpv6Test(void) * of 2 fragments, so 16. */ FAIL_IF(IPV6_GET_PLEN(p) != 16); + /* Verify the payload of the IPv4 packet. */ + uint8_t expected_payload[] = "AAAAAAAABBBBBBBB"; + FAIL_IF(memcmp(GET_PKT_DATA(p) + sizeof(IPV6Hdr), expected_payload, sizeof(expected_payload))); + SCFree(p1); SCFree(p2); SCFree(p3); @@ -2436,11 +2652,11 @@ static int DefragTestBadProto(void) DefragInit(); - p1 = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8); + p1 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8); FAIL_IF_NULL(p1); - p2 = BuildTestPacket(IPPROTO_UDP, id, 1, 1, 'B', 8); + p2 = BuildIpv4TestPacket(IPPROTO_UDP, id, 1, 1, 'B', 8); FAIL_IF_NULL(p2); - p3 = BuildTestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3); + p3 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3); FAIL_IF_NULL(p3); FAIL_IF_NOT_NULL(Defrag(NULL, NULL, p1)); @@ -2461,19 +2677,19 @@ static int DefragTestBadProto(void) */ static int DefragTestJeremyLinux(void) { - char expected[] = "AAAAAAAA" - "AAAAAAAA" - "AAAAAAAA" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "CCCCCCCC" - "BBBBBBBB" - "BBBBBBBB" - "DDDDDDDD" - "DDDDDD"; + uint8_t expected[] = "AAAAAAAA" + "AAAAAAAA" + "AAAAAAAA" + "CCCCCCCC" + "CCCCCCCC" + "CCCCCCCC" + "CCCCCCCC" + "CCCCCCCC" + "CCCCCCCC" + "BBBBBBBB" + "BBBBBBBB" + "DDDDDDDD" + "DDDDDD"; DefragInit(); default_policy = DEFRAG_POLICY_LINUX; @@ -2482,10 +2698,10 @@ static int DefragTestJeremyLinux(void) Packet *packets[4]; int i = 0; - packets[0] = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 24); - packets[1] = BuildTestPacket(IPPROTO_ICMP, id, 40 >> 3, 1, 'B', 48); - packets[2] = BuildTestPacket(IPPROTO_ICMP, id, 24 >> 3, 1, 'C', 48); - packets[3] = BuildTestPacket(IPPROTO_ICMP, id, 88 >> 3, 0, 'D', 14); + packets[0] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 24); + packets[1] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 40 >> 3, 1, 'B', 48); + packets[2] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 24 >> 3, 1, 'C', 48); + packets[3] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 88 >> 3, 0, 'D', 14); Packet *r = Defrag(NULL, NULL, packets[0]); FAIL_IF_NOT_NULL(r); @@ -2510,6 +2726,401 @@ static int DefragTestJeremyLinux(void) PASS; } +/** + * | 0 | 8 | 16 | 24 | 32 | + * |----------|----------|----------|----------|----------| + * | AAAAAAAA | AAAAAAAA | + * | | BBBBBBBB | BBBBBBBB | | | + * | | | CCCCCCCC | CCCCCCCC | | + * | DDDDDDDD | | | | | + * + * | DDDDDDDD | BBBBBBBB | BBBBBBBB | CCCCCCCC | AAAAAAAA | + */ +static int DefragBsdFragmentAfterNoMfIpv4Test(void) +{ + DefragInit(); + default_policy = DEFRAG_POLICY_BSD; + Packet *packets[4]; + + packets[0] = BuildIpv4TestPacket(IPPROTO_ICMP, 0x96, 24 >> 3, 0, 'A', 16); + packets[1] = BuildIpv4TestPacket(IPPROTO_ICMP, 0x96, 8 >> 3, 1, 'B', 16); + packets[2] = BuildIpv4TestPacket(IPPROTO_ICMP, 0x96, 16 >> 3, 1, 'C', 16); + packets[3] = BuildIpv4TestPacket(IPPROTO_ICMP, 0x96, 0, 1, 'D', 8); + + Packet *r = Defrag(NULL, NULL, packets[0]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[1]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[2]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[3]); + FAIL_IF_NULL(r); + + // clang-format off + uint8_t expected[] = { + 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', + 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', + 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', + 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', + 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', + }; + // clang-format on + + if (memcmp(expected, GET_PKT_DATA(r) + 20, sizeof(expected)) != 0) { + printf("Expected:\n"); + PrintRawDataFp(stdout, expected, sizeof(expected)); + printf("Got:\n"); + PrintRawDataFp(stdout, GET_PKT_DATA(r) + 20, GET_PKT_LEN(r) - 20); + FAIL; + } + + DefragDestroy(); + PASS; +} + +static int DefragBsdFragmentAfterNoMfIpv6Test(void) +{ + DefragInit(); + default_policy = DEFRAG_POLICY_BSD; + Packet *packets[4]; + + packets[0] = BuildIpv6TestPacket(IPPROTO_ICMP, 0x96, 24 >> 3, 0, 'A', 16); + packets[1] = BuildIpv6TestPacket(IPPROTO_ICMP, 0x96, 8 >> 3, 1, 'B', 16); + packets[2] = BuildIpv6TestPacket(IPPROTO_ICMP, 0x96, 16 >> 3, 1, 'C', 16); + packets[3] = BuildIpv6TestPacket(IPPROTO_ICMP, 0x96, 0, 1, 'D', 8); + + Packet *r = Defrag(NULL, NULL, packets[0]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[1]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[2]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[3]); + FAIL_IF_NULL(r); + + // clang-format off + uint8_t expected[] = { + 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', + 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', + 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', + 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', + 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', + }; + // clang-format on + + if (memcmp(expected, GET_PKT_DATA(r) + 40, sizeof(expected)) != 0) { + printf("Expected:\n"); + PrintRawDataFp(stdout, expected, sizeof(expected)); + printf("Got:\n"); + PrintRawDataFp(stdout, GET_PKT_DATA(r) + 40, GET_PKT_LEN(r) - 40); + FAIL; + } + + DefragDestroy(); + PASS; +} + +static int DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test_2(void) +{ + DefragInit(); + default_policy = DEFRAG_POLICY_BSD; + Packet *packets[4]; + + /* Packet 1: off=16, mf=1 */ + packets[0] = BuildIpv4TestPacketWithContent( + IPPROTO_ICMP, 6, 16 >> 3, 1, (uint8_t *)"AABBCCDDAABBDDCC", 16); + + /* Packet 2: off=8, mf=1 */ + packets[1] = BuildIpv4TestPacketWithContent( + IPPROTO_ICMP, 6, 8 >> 3, 1, (uint8_t *)"AACCBBDDAACCDDBB", 16); + + /* Packet 3: off=0, mf=1: IP and ICMP header. */ + packets[2] = BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 6, 0, 1, (uint8_t *)"ZZZZZZZZ", 8); + + /* Packet 4: off=8, mf=1 */ + packets[3] = + BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 6, 32 >> 3, 0, (uint8_t *)"DDCCBBAA", 8); + + Packet *r = Defrag(NULL, NULL, packets[0]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[1]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[2]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[3]); + FAIL_IF_NULL(r); + + // clang-format off + const uint8_t expected[] = { + // AACCBBDD + // AACCDDBB + // AABBDDCC + // DDCCBBAA + 'A', 'A', 'C', 'C', 'B', 'B', 'D', 'D', + 'A', 'A', 'C', 'C', 'D', 'D', 'B', 'B', + 'A', 'A', 'B', 'B', 'D', 'D', 'C', 'C', + 'D', 'D', 'C', 'C', 'B', 'B', 'A', 'A', + }; + // clang-format on + + FAIL_IF(memcmp(expected, GET_PKT_DATA(r) + 20 + 8, sizeof(expected)) != 0); + + DefragDestroy(); + PASS; +} + +static int DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test_2(void) +{ + DefragInit(); + default_policy = DEFRAG_POLICY_BSD; + Packet *packets[4]; + + /* Packet 1: off=16, mf=1 */ + packets[0] = BuildIpv6TestPacketWithContent( + IPPROTO_ICMP, 6, 16 >> 3, 1, (uint8_t *)"AABBCCDDAABBDDCC", 16); + + /* Packet 2: off=8, mf=1 */ + packets[1] = BuildIpv6TestPacketWithContent( + IPPROTO_ICMP, 6, 8 >> 3, 1, (uint8_t *)"AACCBBDDAACCDDBB", 16); + + /* Packet 3: off=0, mf=1: IP and ICMP header. */ + packets[2] = BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 6, 0, 1, (uint8_t *)"ZZZZZZZZ", 8); + + /* Packet 4: off=8, mf=1 */ + packets[3] = + BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 6, 32 >> 3, 0, (uint8_t *)"DDCCBBAA", 8); + + Packet *r = Defrag(NULL, NULL, packets[0]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[1]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[2]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[3]); + FAIL_IF_NULL(r); + + // clang-format off + const uint8_t expected[] = { + // AACCBBDD + // AACCDDBB + // AABBDDCC + // DDCCBBAA + 'A', 'A', 'C', 'C', 'B', 'B', 'D', 'D', + 'A', 'A', 'C', 'C', 'D', 'D', 'B', 'B', + 'A', 'A', 'B', 'B', 'D', 'D', 'C', 'C', + 'D', 'D', 'C', 'C', 'B', 'B', 'A', 'A', + }; + // clang-format on + + FAIL_IF(memcmp(expected, GET_PKT_DATA(r) + 40 + 8, sizeof(expected)) != 0); + + DefragDestroy(); + PASS; +} + +/** + * #### Input + * + * | 96 (0) | 104 (8) | 112 (16) | 120 (24) | + * |----------|----------|----------|----------| + * | | EEEEEEEE | EEEEEEEE | EEEEEEEE | + * | MMMMMMMM | MMMMMMMM | MMMMMMMM | | + * + * #### Expected Output + * + * | MMMMMMMM | MMMMMMMM | MMMMMMMM | EEEEEEEE | + */ +static int DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test(void) +{ + DefragInit(); + default_policy = DEFRAG_POLICY_BSD; + Packet *packets[2]; + + packets[0] = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 8 >> 3, 0, 'E', 24); + packets[1] = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 0, 1, 'M', 24); + + Packet *r = Defrag(NULL, NULL, packets[0]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[1]); + FAIL_IF_NULL(r); + + // clang-format off + const uint8_t expected[] = { + 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', + 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', + 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', + 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', + }; + // clang-format on + + if (memcmp(expected, GET_PKT_DATA(r) + 20, sizeof(expected)) != 0) { + printf("Expected:\n"); + PrintRawDataFp(stdout, expected, sizeof(expected)); + printf("Got:\n"); + PrintRawDataFp(stdout, GET_PKT_DATA(r) + 20, GET_PKT_LEN(r) - 20); + FAIL; + } + + PASS; +} + +static int DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test(void) +{ + DefragInit(); + default_policy = DEFRAG_POLICY_BSD; + Packet *packets[2]; + + packets[0] = BuildIpv6TestPacket(IPPROTO_ICMP, 1, 8 >> 3, 0, 'E', 24); + packets[1] = BuildIpv6TestPacket(IPPROTO_ICMP, 1, 0, 1, 'M', 24); + + Packet *r = Defrag(NULL, NULL, packets[0]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[1]); + FAIL_IF_NULL(r); + + // clang-format off + const uint8_t expected[] = { + 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', + 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', + 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', + 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E', + }; + // clang-format on + + if (memcmp(expected, GET_PKT_DATA(r) + 40, sizeof(expected)) != 0) { + printf("Expected:\n"); + PrintRawDataFp(stdout, expected, sizeof(expected)); + printf("Got:\n"); + PrintRawDataFp(stdout, GET_PKT_DATA(r) + 40, GET_PKT_LEN(r) - 40); + FAIL; + } + + PASS; +} + +/** + * Reassembly should fail. + * + * |0 |8 |16 |24 |32 |40 |48 | + * |========|========|========|========|========|========|========| + * | | |AABBCCDD|AABBDDCC| | | | + * | | | | | |AACCBBDD| | + * | |AACCDDBB|AADDBBCC| | | | | + * |ZZZZZZZZ| | | | | | | + * | | | | | | |DDCCBBAA| + */ +static int DefragBsdMissingFragmentIpv4Test(void) +{ + DefragInit(); + default_policy = DEFRAG_POLICY_BSD; + Packet *packets[5]; + + packets[0] = BuildIpv4TestPacketWithContent( + IPPROTO_ICMP, 189, 16 >> 3, 1, (uint8_t *)"AABBCCDDAABBDDCC", 16); + + packets[1] = + BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 189, 40 >> 3, 1, (uint8_t *)"AACCBBDD", 8); + + packets[2] = BuildIpv4TestPacketWithContent( + IPPROTO_ICMP, 189, 8 >> 3, 1, (uint8_t *)"AACCDDBBAADDBBCC", 16); + + /* ICMP header. */ + packets[3] = BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 189, 0, 1, (uint8_t *)"ZZZZZZZZ", 8); + + packets[4] = + BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 189, 48 >> 3, 0, (uint8_t *)"DDCCBBAA", 8); + + Packet *r = Defrag(NULL, NULL, packets[0]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[1]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[2]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[3]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[4]); + FAIL_IF_NOT_NULL(r); + +#if 0 + PrintRawDataFp(stdout, GET_PKT_DATA(r) + 20, GET_PKT_LEN(r) - 20); +#endif + + for (int i = 0; i < 5; i++) { + SCFree(packets[i]); + } + + DefragDestroy(); + + PASS; +} + +static int DefragBsdMissingFragmentIpv6Test(void) +{ + DefragInit(); + default_policy = DEFRAG_POLICY_BSD; + Packet *packets[5]; + + packets[0] = BuildIpv6TestPacketWithContent( + IPPROTO_ICMP, 189, 16 >> 3, 1, (uint8_t *)"AABBCCDDAABBDDCC", 16); + + packets[1] = + BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 189, 40 >> 3, 1, (uint8_t *)"AACCBBDD", 8); + + packets[2] = BuildIpv6TestPacketWithContent( + IPPROTO_ICMP, 189, 8 >> 3, 1, (uint8_t *)"AACCDDBBAADDBBCC", 16); + + /* ICMP header. */ + packets[3] = BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 189, 0, 1, (uint8_t *)"ZZZZZZZZ", 8); + + packets[4] = + BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 189, 48 >> 3, 0, (uint8_t *)"DDCCBBAA", 8); + + Packet *r = Defrag(NULL, NULL, packets[0]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[1]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[2]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[3]); + FAIL_IF_NOT_NULL(r); + + r = Defrag(NULL, NULL, packets[4]); + FAIL_IF_NOT_NULL(r); + +#if 0 + PrintRawDataFp(stdout, GET_PKT_DATA(r) + 40, GET_PKT_LEN(r) - 40); +#endif + + for (int i = 0; i < 5; i++) { + SCFree(packets[i]); + } + + DefragDestroy(); + + PASS; +} + #endif /* UNITTESTS */ void DefragRegisterTests(void) @@ -2527,23 +3138,17 @@ void DefragRegisterTests(void) UtRegisterTest("DefragSturgesNovakFirstTest", DefragSturgesNovakFirstTest); UtRegisterTest("DefragSturgesNovakLastTest", DefragSturgesNovakLastTest); - UtRegisterTest("DefragIPv4NoDataTest", DefragIPv4NoDataTest); - UtRegisterTest("DefragIPv4TooLargeTest", DefragIPv4TooLargeTest); - - UtRegisterTest("IPV6DefragInOrderSimpleTest", IPV6DefragInOrderSimpleTest); - UtRegisterTest("IPV6DefragReverseSimpleTest", IPV6DefragReverseSimpleTest); - UtRegisterTest("IPV6DefragSturgesNovakBsdTest", - IPV6DefragSturgesNovakBsdTest); - UtRegisterTest("IPV6DefragSturgesNovakLinuxTest", - IPV6DefragSturgesNovakLinuxTest); - UtRegisterTest("IPV6DefragSturgesNovakWindowsTest", - IPV6DefragSturgesNovakWindowsTest); - UtRegisterTest("IPV6DefragSturgesNovakSolarisTest", - IPV6DefragSturgesNovakSolarisTest); - UtRegisterTest("IPV6DefragSturgesNovakFirstTest", - IPV6DefragSturgesNovakFirstTest); - UtRegisterTest("IPV6DefragSturgesNovakLastTest", - IPV6DefragSturgesNovakLastTest); + UtRegisterTest("DefragNoDataIpv4Test", DefragNoDataIpv4Test); + UtRegisterTest("DefragTooLargeIpv4Test", DefragTooLargeIpv4Test); + + UtRegisterTest("DefragInOrderSimpleIpv6Test", DefragInOrderSimpleIpv6Test); + UtRegisterTest("DefragReverseSimpleIpv6Test", DefragReverseSimpleIpv6Test); + UtRegisterTest("DefragSturgesNovakBsdIpv6Test", DefragSturgesNovakBsdIpv6Test); + UtRegisterTest("DefragSturgesNovakLinuxIpv6Test", DefragSturgesNovakLinuxIpv6Test); + UtRegisterTest("DefragSturgesNovakWindowsIpv6Test", DefragSturgesNovakWindowsIpv6Test); + UtRegisterTest("DefragSturgesNovakSolarisIpv6Test", DefragSturgesNovakSolarisIpv6Test); + UtRegisterTest("DefragSturgesNovakFirstIpv6Test", DefragSturgesNovakFirstIpv6Test); + UtRegisterTest("DefragSturgesNovakLastIpv6Test", DefragSturgesNovakLastIpv6Test); UtRegisterTest("DefragVlanTest", DefragVlanTest); UtRegisterTest("DefragVlanQinQTest", DefragVlanQinQTest); @@ -2555,5 +3160,18 @@ void DefragRegisterTests(void) UtRegisterTest("DefragTestBadProto", DefragTestBadProto); UtRegisterTest("DefragTestJeremyLinux", DefragTestJeremyLinux); + + UtRegisterTest("DefragBsdFragmentAfterNoMfIpv4Test", DefragBsdFragmentAfterNoMfIpv4Test); + UtRegisterTest("DefragBsdFragmentAfterNoMfIpv6Test", DefragBsdFragmentAfterNoMfIpv6Test); + UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test", + DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test); + UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test", + DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test); + UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test_2", + DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test_2); + UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test_2", + DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test_2); + UtRegisterTest("DefragBsdMissingFragmentIpv4Test", DefragBsdMissingFragmentIpv4Test); + UtRegisterTest("DefragBsdMissingFragmentIpv6Test", DefragBsdMissingFragmentIpv6Test); #endif /* UNITTESTS */ } diff --git a/src/defrag.h b/src/defrag.h index 11e6a619b2f1..93fe872c023a 100644 --- a/src/defrag.h +++ b/src/defrag.h @@ -106,6 +106,7 @@ typedef struct DefragTracker_ { Address src_addr; /**< Source address for this tracker. */ Address dst_addr; /**< Destination address for this tracker. */ + int datalink; /**< datalink for reassembled packet, set by first fragment */ SCTime_t timeout; /**< When this tracker will timeout. */ uint32_t host_timeout; /**< Host timeout, statically assigned from the yaml */ diff --git a/src/detect-bsize.c b/src/detect-bsize.c index 3b3efe87b7ff..8d8b34ea4be5 100644 --- a/src/detect-bsize.c +++ b/src/detect-bsize.c @@ -157,20 +157,6 @@ int DetectBsizeMatch(const SigMatchCtx *ctx, const uint64_t buffer_size, bool eo return 0; } -/** - * \brief This function is used to parse bsize options passed via bsize: keyword - * - * \param bsizestr Pointer to the user provided bsize options - * - * \retval bsized pointer to DetectU64Data on success - * \retval NULL on failure - */ - -static DetectU64Data *DetectBsizeParse(const char *str) -{ - return DetectU64Parse(str); -} - static int SigParseGetMaxBsize(DetectU64Data *bsz) { switch (bsz->mode) { @@ -208,9 +194,9 @@ static int DetectBsizeSetup (DetectEngineCtx *de_ctx, Signature *s, const char * if (list == DETECT_SM_LIST_NOTSET) SCReturnInt(-1); - DetectU64Data *bsz = DetectBsizeParse(sizestr); + DetectU64Data *bsz = DetectU64Parse(sizestr); if (bsz == NULL) - goto error; + SCReturnInt(-1); sm = SigMatchAlloc(); if (sm == NULL) diff --git a/src/detect-byte-extract.c b/src/detect-byte-extract.c index ec9b27fc6406..81bf37a45525 100644 --- a/src/detect-byte-extract.c +++ b/src/detect-byte-extract.c @@ -3519,8 +3519,7 @@ static int DetectByteExtractTest53(void) goto end; } bjd = (DetectBytejumpData *)sm->ctx; - if (bjd->flags != DETECT_CONTENT_OFFSET_VAR || - bjd->offset != 0) { + if (bjd->flags != DETECT_BYTEJUMP_OFFSET_VAR || bjd->offset != 0) { printf("three failed\n"); result = 0; goto end; @@ -3624,8 +3623,7 @@ static int DetectByteExtractTest54(void) goto end; } bjd = (DetectBytejumpData *)sm->ctx; - if (bjd->flags != DETECT_CONTENT_OFFSET_VAR || - bjd->offset != 0) { + if (bjd->flags != DETECT_BYTEJUMP_OFFSET_VAR || bjd->offset != 0) { printf("three failed\n"); result = 0; goto end; @@ -3637,8 +3635,7 @@ static int DetectByteExtractTest54(void) goto end; } bjd = (DetectBytejumpData *)sm->ctx; - if (bjd->flags != DETECT_CONTENT_OFFSET_VAR || - bjd->offset != 1) { + if (bjd->flags != DETECT_BYTEJUMP_OFFSET_VAR || bjd->offset != 1) { printf("four failed\n"); result = 0; goto end; @@ -4171,8 +4168,7 @@ static int DetectByteExtractTest58(void) goto end; } bjd = (DetectBytejumpData *)sm->ctx; - if (bjd->flags != DETECT_CONTENT_OFFSET_VAR || - bjd->offset != 0) { + if (bjd->flags != DETECT_BYTEJUMP_OFFSET_VAR || bjd->offset != 0) { printf("three failed\n"); result = 0; goto end; @@ -4184,8 +4180,7 @@ static int DetectByteExtractTest58(void) goto end; } bjd = (DetectBytejumpData *)sm->ctx; - if (bjd->flags != DETECT_CONTENT_OFFSET_VAR || - bjd->offset != 1) { + if (bjd->flags != DETECT_BYTEJUMP_OFFSET_VAR || bjd->offset != 1) { printf("four failed\n"); result = 0; goto end; @@ -4304,8 +4299,7 @@ static int DetectByteExtractTest59(void) goto end; } bjd = (DetectBytejumpData *)sm->ctx; - if (bjd->flags != DETECT_CONTENT_OFFSET_VAR || - bjd->offset != 0) { + if (bjd->flags != DETECT_BYTEJUMP_OFFSET_VAR || bjd->offset != 0) { printf("three failed\n"); result = 0; goto end; @@ -4317,8 +4311,7 @@ static int DetectByteExtractTest59(void) goto end; } bjd = (DetectBytejumpData *)sm->ctx; - if (bjd->flags != DETECT_CONTENT_OFFSET_VAR || - bjd->offset != 1) { + if (bjd->flags != DETECT_BYTEJUMP_OFFSET_VAR || bjd->offset != 1) { printf("four failed\n"); result = 0; goto end; diff --git a/src/detect-bytejump.c b/src/detect-bytejump.c index b0b034774636..c20e49d9c5b7 100644 --- a/src/detect-bytejump.c +++ b/src/detect-bytejump.c @@ -564,7 +564,7 @@ static int DetectBytejumpSetup(DetectEngineCtx *de_ctx, Signature *s, const char goto error; } data->offset = index; - data->flags |= DETECT_CONTENT_OFFSET_VAR; + data->flags |= DETECT_BYTEJUMP_OFFSET_VAR; SCFree(offset); offset = NULL; } diff --git a/src/detect-bytejump.h b/src/detect-bytejump.h index f8ee530b3864..15f610344320 100644 --- a/src/detect-bytejump.h +++ b/src/detect-bytejump.h @@ -41,6 +41,7 @@ #define DETECT_BYTEJUMP_OFFSET_BE BIT_U16(7) /**< "byte extract" enabled */ #define DETECT_BYTEJUMP_END BIT_U16(8) /**< "from_end" jump */ #define DETECT_BYTEJUMP_NBYTES_VAR BIT_U16(9) /**< nbytes string*/ +#define DETECT_BYTEJUMP_OFFSET_VAR BIT_U16(10) /**< byte extract value enabled */ typedef struct DetectBytejumpData_ { uint8_t nbytes; /**< Number of bytes to compare */ diff --git a/src/detect-content.c b/src/detect-content.c index 5bbe9e9b3cae..be1f12559491 100644 --- a/src/detect-content.c +++ b/src/detect-content.c @@ -574,10 +574,21 @@ static void PropagateLimits(Signature *s, SigMatch *sm_head) SCLogDebug("stored: offset %u depth %u offset_plus_pat %u " "has_active_depth_chain %s", offset, depth, offset_plus_pat, has_active_depth_chain ? "true" : "false"); - if (cd->flags & DETECT_CONTENT_DISTANCE && cd->distance >= 0) { - VALIDATE((uint32_t)offset_plus_pat + cd->distance <= UINT16_MAX); - offset = cd->offset = (uint16_t)(offset_plus_pat + cd->distance); - SCLogDebug("updated content to have offset %u", cd->offset); + if (cd->flags & DETECT_CONTENT_DISTANCE) { + if (cd->distance >= 0) { + VALIDATE((uint32_t)offset_plus_pat + cd->distance <= UINT16_MAX); + offset = cd->offset = (uint16_t)(offset_plus_pat + cd->distance); + SCLogDebug("distance %d: updated content to have offset %u", cd->distance, + cd->offset); + } else { + if (abs(cd->distance) > offset_plus_pat) + offset = cd->offset = 0; + else + offset = cd->offset = (uint16_t)(offset_plus_pat + cd->distance); + offset_plus_pat = offset + cd->content_len; + SCLogDebug("distance %d: updated content to have offset %u", cd->distance, + cd->offset); + } } if (has_active_depth_chain) { if (offset_plus_pat && cd->flags & DETECT_CONTENT_WITHIN && cd->within >= 0) { diff --git a/src/detect-dataset.c b/src/detect-dataset.c index 3d2964605e6e..69eaf811133c 100644 --- a/src/detect-dataset.c +++ b/src/detect-dataset.c @@ -407,10 +407,6 @@ int DetectDatasetSetup (DetectEngineCtx *de_ctx, Signature *s, const char *rawst SCLogError("failed to set up dataset '%s'.", name); return -1; } - if (set->hash && SC_ATOMIC_GET(set->hash->memcap_reached)) { - SCLogError("dataset too large for set memcap"); - return -1; - } cd = SCCalloc(1, sizeof(DetectDatasetData)); if (unlikely(cd == NULL)) diff --git a/src/detect-dsize.c b/src/detect-dsize.c index 4336e3546b9f..7dd6b4012e3b 100644 --- a/src/detect-dsize.c +++ b/src/detect-dsize.c @@ -124,7 +124,7 @@ static int DetectDsizeSetup (DetectEngineCtx *de_ctx, Signature *s, const char * if (DetectGetLastSMFromLists(s, DETECT_DSIZE, -1)) { SCLogError("Can't use 2 or more dsizes in " "the same sig. Invalidating signature."); - goto error; + return -1; } SCLogDebug("\'%s\'", rawstr); @@ -132,7 +132,7 @@ static int DetectDsizeSetup (DetectEngineCtx *de_ctx, Signature *s, const char * dd = DetectU16Parse(rawstr); if (dd == NULL) { SCLogError("Parsing \'%s\' failed", rawstr); - goto error; + return -1; } /* Okay so far so good, lets get this into a SigMatch @@ -141,7 +141,7 @@ static int DetectDsizeSetup (DetectEngineCtx *de_ctx, Signature *s, const char * if (sm == NULL){ SCLogError("Failed to allocate memory for SigMatch"); rs_detect_u16_free(dd); - goto error; + return -1; } sm->type = DETECT_DSIZE; @@ -160,9 +160,6 @@ static int DetectDsizeSetup (DetectEngineCtx *de_ctx, Signature *s, const char * } return 0; - -error: - return -1; } /** diff --git a/src/detect-engine-address.c b/src/detect-engine-address.c index ac10e142dedc..191e8f504d09 100644 --- a/src/detect-engine-address.c +++ b/src/detect-engine-address.c @@ -1362,23 +1362,28 @@ void DetectAddressMapFree(DetectEngineCtx *de_ctx) return; } -static int DetectAddressMapAdd(DetectEngineCtx *de_ctx, const char *string, - DetectAddressHead *address, bool contains_negation) +static bool DetectAddressMapAdd(DetectEngineCtx *de_ctx, const char *string, + DetectAddressHead *address, bool contains_negation) { DetectAddressMap *map = SCCalloc(1, sizeof(*map)); if (map == NULL) - return -1; + return false; map->string = SCStrdup(string); if (map->string == NULL) { SCFree(map); - return -1; + return false; } map->address = address; map->contains_negation = contains_negation; - BUG_ON(HashListTableAdd(de_ctx->address_table, (void *)map, 0) != 0); - return 0; + if (HashListTableAdd(de_ctx->address_table, map, 0) != 0) { + SCFree(map->string); + SCFree(map); + return false; + } + + return true; } static const DetectAddressMap *DetectAddressMapLookup(DetectEngineCtx *de_ctx, @@ -1471,8 +1476,11 @@ const DetectAddressHead *DetectParseAddress(DetectEngineCtx *de_ctx, *contains_negation = false; } - DetectAddressMapAdd((DetectEngineCtx *)de_ctx, string, head, - *contains_negation); + if (!DetectAddressMapAdd((DetectEngineCtx *)de_ctx, string, head, *contains_negation)) { + DetectAddressHeadFree(head); + return NULL; + } + return head; } diff --git a/src/detect-engine-alert.c b/src/detect-engine-alert.c index f9cbed1564c5..466ca4570376 100644 --- a/src/detect-engine-alert.c +++ b/src/detect-engine-alert.c @@ -208,7 +208,7 @@ static void PacketApplySignatureActions(Packet *p, const Signature *s, const Pac // nothing to set in the packet } else if (pa->action & (ACTION_ALERT | ACTION_CONFIG)) { // nothing to set in the packet - } else { + } else if (pa->action != 0) { DEBUG_VALIDATE_BUG_ON(1); // should be unreachable } @@ -272,7 +272,7 @@ static inline PacketAlert PacketAlertSet( pa.s = (Signature *)s; pa.flags = alert_flags; /* Set tx_id if the frame has it */ - pa.tx_id = (tx_id == UINT64_MAX) ? 0 : tx_id; + pa.tx_id = tx_id; pa.frame_id = (alert_flags & PACKET_ALERT_FLAG_FRAME) ? det_ctx->frame_id : 0; return pa; } @@ -317,10 +317,15 @@ static int AlertQueueSortHelper(const void *a, const void *b) { const PacketAlert *pa0 = a; const PacketAlert *pa1 = b; - if (pa1->num == pa0->num) + if (pa1->num == pa0->num) { + if (pa1->tx_id == PACKET_ALERT_NOTX) { + return -1; + } else if (pa0->tx_id == PACKET_ALERT_NOTX) { + return 1; + } return pa0->tx_id < pa1->tx_id ? 1 : -1; - else - return pa0->num > pa1->num ? 1 : -1; + } + return pa0->num > pa1->num ? 1 : -1; } /** \internal @@ -372,10 +377,7 @@ void PacketAlertFinalize(DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx qsort(det_ctx->alert_queue, det_ctx->alert_queue_size, sizeof(PacketAlert), AlertQueueSortHelper); - uint16_t i = 0; - uint16_t max_pos = det_ctx->alert_queue_size; - - while (i < max_pos) { + for (uint16_t i = 0; i < det_ctx->alert_queue_size; i++) { PacketAlert *pa = &det_ctx->alert_queue[i]; const Signature *s = de_ctx->sig_array[pa->num]; int res = PacketAlertHandle(de_ctx, det_ctx, s, p, pa); @@ -407,22 +409,30 @@ void PacketAlertFinalize(DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx } /* Thresholding removes this alert */ - if (res == 0 || res == 2 || (s->flags & SIG_FLAG_NOALERT)) { + if (res == 0 || res == 2 || (s->action & (ACTION_ALERT | ACTION_PASS)) == 0) { + SCLogDebug("sid:%u: skipping alert because of thresholding (res=%d) or NOALERT (%02x)", + s->id, res, s->action); /* we will not copy this to the AlertQueue */ p->alerts.suppressed++; } else if (p->alerts.cnt < packet_alert_max) { p->alerts.alerts[p->alerts.cnt] = *pa; SCLogDebug("Appending sid %" PRIu32 " alert to Packet::alerts at pos %u", s->id, i); - /* pass "alert" found, we're done */ - if (pa->action & ACTION_PASS) { + /* pass w/o alert found, we're done. Alert is not logged. */ + if ((pa->action & (ACTION_PASS | ACTION_ALERT)) == ACTION_PASS) { + SCLogDebug("sid:%u: is a pass rule, so break out of loop", s->id); break; } p->alerts.cnt++; + + /* pass with alert, we're done. Alert is logged. */ + if (pa->action & ACTION_PASS) { + SCLogDebug("sid:%u: is a pass rule, so break out of loop", s->id); + break; + } } else { p->alerts.discarded++; } - i++; } /* At this point, we should have all the new alerts. Now check the tag diff --git a/src/detect-engine-analyzer.c b/src/detect-engine-analyzer.c index a37afabb0f00..212d04d46cd0 100644 --- a/src/detect-engine-analyzer.c +++ b/src/detect-engine-analyzer.c @@ -27,6 +27,7 @@ #include "suricata-common.h" #include "suricata.h" #include "rust.h" +#include "action-globals.h" #include "detect.h" #include "detect-parse.h" #include "detect-engine.h" @@ -701,10 +702,10 @@ static void DumpContent(JsonBuilder *js, const DetectContentData *cd) jb_set_uint(js, "depth", cd->depth); } if (cd->flags & DETECT_CONTENT_DISTANCE) { - jb_set_uint(js, "distance", cd->distance); + jb_set_int(js, "distance", cd->distance); } if (cd->flags & DETECT_CONTENT_WITHIN) { - jb_set_uint(js, "within", cd->within); + jb_set_int(js, "within", cd->within); } jb_set_bool(js, "fast_pattern", cd->flags & DETECT_CONTENT_FAST_PATTERN); jb_set_bool(js, "relative_next", cd->flags & DETECT_CONTENT_RELATIVE_NEXT); @@ -777,9 +778,9 @@ static void DumpMatches(RuleAnalyzer *ctx, JsonBuilder *js, const SigMatchData * jb_open_object(js, "byte_jump"); jb_set_uint(js, "nbytes", cd->nbytes); - jb_set_uint(js, "offset", cd->offset); + jb_set_int(js, "offset", cd->offset); jb_set_uint(js, "multiplier", cd->multiplier); - jb_set_uint(js, "post_offset", cd->post_offset); + jb_set_int(js, "post_offset", cd->post_offset); switch (cd->base) { case DETECT_BYTEJUMP_BASE_UNSET: jb_set_string(js, "base", "unset"); @@ -822,7 +823,7 @@ static void DumpMatches(RuleAnalyzer *ctx, JsonBuilder *js, const SigMatchData * jb_open_object(js, "byte_test"); jb_set_uint(js, "nbytes", cd->nbytes); - jb_set_uint(js, "offset", cd->offset); + jb_set_int(js, "offset", cd->offset); switch (cd->base) { case DETECT_BYTETEST_BASE_UNSET: jb_set_string(js, "base", "unset"); @@ -964,7 +965,7 @@ void EngineAnalysisRules2(const DetectEngineCtx *de_ctx, const Signature *s) if (s->flags & SIG_FLAG_DP_ANY) { jb_append_string(ctx.js, "dp_any"); } - if (s->flags & SIG_FLAG_NOALERT) { + if ((s->action & ACTION_ALERT) == 0) { jb_append_string(ctx.js, "noalert"); } if (s->flags & SIG_FLAG_DSIZE) { diff --git a/src/detect-engine-build.c b/src/detect-engine-build.c index 8b7621271983..c34a8adaadfb 100644 --- a/src/detect-engine-build.c +++ b/src/detect-engine-build.c @@ -37,7 +37,9 @@ #include "detect-flow.h" #include "detect-config.h" #include "detect-flowbits.h" +#include "app-layer-events.h" +#include "util-port-interval-tree.h" #include "util-profiling.h" #include "util-validate.h" #include "util-var-name.h" @@ -409,7 +411,8 @@ PacketCreateMask(Packet *p, SignatureMask *mask, AppProto alproto, (*mask) |= SIG_MASK_REQUIRE_NO_PAYLOAD; } - if (p->events.cnt > 0 || app_decoder_events != 0 || p->app_layer_events != NULL) { + if (p->events.cnt > 0 || app_decoder_events != 0 || + (p->app_layer_events != NULL && p->app_layer_events->cnt)) { SCLogDebug("packet/flow has events set"); (*mask) |= SIG_MASK_REQUIRE_ENGINE_EVENT; } @@ -550,9 +553,12 @@ static int SignatureCreateMask(Signature *s) } break; } + case DETECT_DECODE_EVENT: + // fallthrough + case DETECT_STREAM_EVENT: + // fallthrough case DETECT_AL_APP_LAYER_EVENT: - s->mask |= SIG_MASK_REQUIRE_ENGINE_EVENT; - break; + // fallthrough case DETECT_ENGINE_EVENT: s->mask |= SIG_MASK_REQUIRE_ENGINE_EVENT; break; @@ -1101,8 +1107,9 @@ static int PortIsWhitelisted(const DetectEngineCtx *de_ctx, w = de_ctx->udp_whitelist; while (w) { - if (a->port >= w->port && a->port2 <= w->port) { - SCLogDebug("port group %u:%u whitelisted -> %d", a->port, a->port2, w->port); + /* Make sure the whitelist port falls in the port range of a */ + DEBUG_VALIDATE_BUG_ON(a->port > a->port2); + if (a->port == w->port && w->port2 == a->port2) { return 1; } w = w->next; @@ -1152,8 +1159,349 @@ static int RuleSetWhitelist(Signature *s) return wl; } -int CreateGroupedPortList(DetectEngineCtx *de_ctx, DetectPort *port_list, DetectPort **newhead, uint32_t unique_groups, int (*CompareFunc)(DetectPort *, DetectPort *), uint32_t max_idx); -int CreateGroupedPortListCmpCnt(DetectPort *a, DetectPort *b); +static int SortCompare(const void *a, const void *b) +{ + const DetectPort *pa = *(const DetectPort **)a; + const DetectPort *pb = *(const DetectPort **)b; + + if (pa->sh->init->whitelist < pb->sh->init->whitelist) { + return 1; + } else if (pa->sh->init->whitelist > pb->sh->init->whitelist) { + return -1; + } + + if (pa->sh->init->sig_cnt < pb->sh->init->sig_cnt) { + return 1; + } else if (pa->sh->init->sig_cnt > pb->sh->init->sig_cnt) { + return -1; + } + + /* Hack to make the qsort output deterministic across platforms. + * This had to be done because the order of equal elements sorted + * by qsort is undeterministic and showed different output on BSD, + * MacOS and Windows. Sorting based on id makes it deterministic. */ + if (pa->sh->id < pb->sh->id) + return -1; + + return 1; +} + +static inline void SortGroupList(uint32_t *groups, DetectPort **list, uint32_t max_idx, + int (*CompareFunc)(const void *, const void *)) +{ + int cnt = 0; + for (DetectPort *x = *list; x != NULL; x = x->next) { + DEBUG_VALIDATE_BUG_ON(x->port > x->port2); + cnt++; + } + if (cnt <= 1) + return; + + /* build temporary array to sort with qsort */ + DetectPort **array = (DetectPort **)SCCalloc(cnt, sizeof(DetectPort *)); + if (array == NULL) + return; + + int idx = 0; + for (DetectPort *x = *list; x != NULL;) { + /* assign a temporary id to resolve otherwise equal groups */ + x->sh->id = idx + 1; + SigGroupHeadSetSigCnt(x->sh, max_idx); + DetectPort *next = x->next; + x->next = x->prev = x->last = NULL; + DEBUG_VALIDATE_BUG_ON(x->port > x->port2); + array[idx++] = x; + x = next; + } + DEBUG_VALIDATE_BUG_ON(cnt != idx); + + qsort(array, idx, sizeof(DetectPort *), SortCompare); + + /* rebuild the list based on the qsort-ed array */ + DetectPort *new_list = NULL, *tail = NULL; + for (int i = 0; i < idx; i++) { + DetectPort *p = array[i]; + /* unset temporary group id */ + p->sh->id = 0; + + if (new_list == NULL) { + new_list = p; + } + if (tail != NULL) { + tail->next = p; + } + p->prev = tail; + tail = p; + } + + *list = new_list; + *groups = idx; + +#if DEBUG + int dbgcnt = 0; + SCLogDebug("SORTED LIST:"); + for (DetectPort *tmp = *list; tmp != NULL; tmp = tmp->next) { + SCLogDebug("item:= [%u:%u]; whitelist: %d; sig_cnt: %d", tmp->port, tmp->port2, + tmp->sh->init->whitelist, tmp->sh->init->sig_cnt); + dbgcnt++; + BUG_ON(dbgcnt > cnt); + } +#endif + SCFree(array); +} +/** \internal + * \brief Create a list of DetectPort objects sorted based on CompareFunc's + * logic. + * + * List can limit the number of groups. In this case an extra "join" group + * is created that contains the sigs belonging to that. It's *appended* to + * the list, meaning that if the list is walked linearly it's found last. + * The joingr is meant to be a catch all. + * + */ +static int CreateGroupedPortList(DetectEngineCtx *de_ctx, DetectPort *port_list, + DetectPort **newhead, uint32_t unique_groups, uint32_t max_idx, + int (*CompareFunc)(const void *, const void *)) +{ + DetectPort *tmplist = NULL, *joingr = NULL; + uint32_t groups = 0; + + /* insert the ports into the tmplist, where it will + * be sorted descending on 'cnt' and on whether a group + * is whitelisted. */ + tmplist = port_list; + SortGroupList(&groups, &tmplist, max_idx, SortCompare); + uint32_t left = unique_groups; + if (left == 0) + left = groups; + + /* create another list: take the port groups from above + * and add them to the 2nd list until we have met our + * count. The rest is added to the 'join' group. */ + DetectPort *tmplist2 = NULL, *tmplist2_tail = NULL; + DetectPort *gr, *next_gr; + for (gr = tmplist; gr != NULL;) { + next_gr = gr->next; + + SCLogDebug("temp list gr %p %u:%u", gr, gr->port, gr->port2); + DetectPortPrint(gr); + + /* if we've set up all the unique groups, add the rest to the + * catch-all joingr */ + if (left == 0) { + if (joingr == NULL) { + DetectPortParse(de_ctx, &joingr, "0:65535"); + if (joingr == NULL) { + goto error; + } + SCLogDebug("joingr => %u-%u", joingr->port, joingr->port2); + joingr->next = NULL; + } + SigGroupHeadCopySigs(de_ctx, gr->sh, &joingr->sh); + + /* when a group's sigs are added to the joingr, we can free it */ + gr->next = NULL; + DetectPortFree(de_ctx, gr); + /* append */ + } else { + gr->next = NULL; + + if (tmplist2 == NULL) { + tmplist2 = gr; + tmplist2_tail = gr; + } else { + tmplist2_tail->next = gr; + tmplist2_tail = gr; + } + } + + if (left > 0) + left--; + + gr = next_gr; + } + + /* if present, append the joingr that covers the rest */ + if (joingr != NULL) { + SCLogDebug("appending joingr %p %u:%u", joingr, joingr->port, joingr->port2); + + if (tmplist2 == NULL) { + tmplist2 = joingr; + // tmplist2_tail = joingr; + } else { + tmplist2_tail->next = joingr; + // tmplist2_tail = joingr; + } + } else { + SCLogDebug("no joingr"); + } + + /* pass back our new list to the caller */ + *newhead = tmplist2; + DetectPortPrintList(*newhead); + + return 0; +error: + return -1; +} + +#define UNDEFINED_PORT 0 +#define RANGE_PORT 1 +#define SINGLE_PORT 2 + +typedef struct UniquePortPoint_ { + uint16_t port; /* value of the port */ + bool single; /* is the port single or part of a range */ +} UniquePortPoint; + +/** + * \brief Function to set unique port points. Consider all the ports + * flattened out on one line, set the points that correspond + * to a valid port. Also store whether the port point stored + * was a single port or part of a range. + * + * \param p Port object to be set + * \param unique_list List of unique port points to be updated + * \param size_list Current size of the list + * + * \return Updated size of the list + */ +static inline uint32_t SetUniquePortPoints( + const DetectPort *p, uint8_t *unique_list, uint32_t size_list) +{ + if (unique_list[p->port] == UNDEFINED_PORT) { + if (p->port == p->port2) { + unique_list[p->port] = SINGLE_PORT; + } else { + unique_list[p->port] = RANGE_PORT; + } + size_list++; + } else if (((unique_list[p->port] == SINGLE_PORT) && (p->port != p->port2)) || + ((unique_list[p->port] == RANGE_PORT) && (p->port == p->port2))) { + if ((p->port != UINT16_MAX) && (unique_list[p->port + 1] == UNDEFINED_PORT)) { + unique_list[p->port + 1] = RANGE_PORT; + size_list++; + } + } + + /* Treat right boundary as single point to avoid creating unneeded + * ranges later on */ + if (unique_list[p->port2] == UNDEFINED_PORT) { + size_list++; + } + unique_list[p->port2] = SINGLE_PORT; + return size_list; +} + +/** + * \brief Function to set the *final* unique port points and save them + * for later use. The points are already sorted because of the way + * they have been retrieved and saved earlier for use at this point. + * + * \param unique_list List of the unique port points to be used + * \param size_unique_arr Number of unique port points + * \param final_arr List of the final unique port points to be created + */ +static inline void SetFinalUniquePortPoints( + const uint8_t *unique_list, const uint32_t size_unique_arr, UniquePortPoint *final_arr) +{ + for (uint32_t i = 0, j = 0; i < (UINT16_MAX + 1); i++) { + DEBUG_VALIDATE_BUG_ON(j > size_unique_arr); + if (unique_list[i] == RANGE_PORT) { + final_arr[j].port = (uint16_t)i; + final_arr[j++].single = false; + } else if (unique_list[i] == SINGLE_PORT) { + final_arr[j].port = (uint16_t)i; + final_arr[j++].single = true; + } + } +} + +/** + * \brief Function to create the list of ports with the smallest ranges + * by resolving overlaps and end point conditions. These contain the + * correct SGHs as well after going over the interval tree to find + * any range overlaps. + * + * \param de_ctx Detection Engine Context + * \param unique_list Final list of unique port points + * \param size_list Size of the unique_list + * \param it Pointer to the interval tree + * \param list Pointer to the list where final ports will be stored + * + * \return 0 on success, -1 otherwise + */ +static inline int CreatePortList(DetectEngineCtx *de_ctx, const uint8_t *unique_list, + const uint32_t size_list, SCPortIntervalTree *it, DetectPort **list) +{ + /* Only do the operations if there is at least one unique port */ + if (size_list == 0) + return 0; + UniquePortPoint *final_unique_points = + (UniquePortPoint *)SCCalloc(size_list, sizeof(UniquePortPoint)); + if (final_unique_points == NULL) + return -1; + SetFinalUniquePortPoints(unique_list, size_list, final_unique_points); + /* Handle edge case when there is just one unique port */ + if (size_list == 1) { + SCPortIntervalFindOverlappingRanges( + de_ctx, final_unique_points[0].port, final_unique_points[0].port, &it->tree, list); + } else { + UniquePortPoint *p1 = &final_unique_points[0]; + UniquePortPoint *p2 = &final_unique_points[1]; + uint16_t port = p1 ? p1->port : 0; // just for cppcheck + uint16_t port2 = p2->port; + for (uint32_t i = 1; i < size_list; i++) { + DEBUG_VALIDATE_BUG_ON(port > port2); + if ((p1 && p1->single) && p2->single) { + SCPortIntervalFindOverlappingRanges(de_ctx, port, port, &it->tree, list); + SCPortIntervalFindOverlappingRanges(de_ctx, port2, port2, &it->tree, list); + port = port2 + 1; + } else if (p1 && p1->single) { + SCPortIntervalFindOverlappingRanges(de_ctx, port, port, &it->tree, list); + if ((port2 > port + 1)) { + SCPortIntervalFindOverlappingRanges( + de_ctx, port + 1, port2 - 1, &it->tree, list); + port = port2; + } else { + port = port + 1; + } + } else if (p2->single) { + /* If port2 is boundary and less or equal to port + 1, create a range + * keeping the boundary away as it is single port */ + if ((port2 >= port + 1)) { + SCPortIntervalFindOverlappingRanges(de_ctx, port, port2 - 1, &it->tree, list); + } + /* Deal with port2 as it is a single port */ + SCPortIntervalFindOverlappingRanges(de_ctx, port2, port2, &it->tree, list); + port = port2 + 1; + } else { + if ((port2 > port + 1)) { + SCPortIntervalFindOverlappingRanges(de_ctx, port, port2 - 1, &it->tree, list); + port = port2; + } else { + SCPortIntervalFindOverlappingRanges(de_ctx, port, port2, &it->tree, list); + port = port2 + 1; + } + } + /* if the current port matches the p2->port, assign it to p1 so that + * there is a UniquePortPoint object to check other info like whether + * the port with this value is single */ + if (port == p2->port) { + p1 = p2; + } else { + p1 = NULL; + } + if (i + 1 < size_list) { + p2 = &final_unique_points[i + 1]; + port2 = p2->port; + } + } + } + /* final_unique_points array is no longer needed */ + SCFree(final_unique_points); + return 0; +} static DetectPort *RulesGroupByPorts(DetectEngineCtx *de_ctx, uint8_t ipproto, uint32_t direction) { @@ -1163,8 +1511,14 @@ static DetectPort *RulesGroupByPorts(DetectEngineCtx *de_ctx, uint8_t ipproto, u DetectPortHashInit(de_ctx); uint32_t max_idx = 0; + uint32_t size_unique_port_arr = 0; const Signature *s = de_ctx->sig_list; DetectPort *list = NULL; + + uint8_t *unique_port_points = (uint8_t *)SCCalloc(UINT16_MAX + 1, sizeof(uint8_t)); + if (unique_port_points == NULL) + return NULL; + while (s) { /* IP Only rules are handled separately */ if (s->type == SIG_TYPE_IPONLY) @@ -1216,6 +1570,8 @@ static DetectPort *RulesGroupByPorts(DetectEngineCtx *de_ctx, uint8_t ipproto, u SigGroupHeadAppendSig(de_ctx, &tmp2->sh, s); tmp2->sh->init->whitelist = pwl; DetectPortHashAdd(de_ctx, tmp2); + size_unique_port_arr = + SetUniquePortPoints(tmp2, unique_port_points, size_unique_port_arr); } p = p->next; @@ -1225,18 +1581,34 @@ static DetectPort *RulesGroupByPorts(DetectEngineCtx *de_ctx, uint8_t ipproto, u s = s->next; } - /* step 2: create a list of DetectPort objects */ + /* step 2: create a list of the smallest port ranges with + * appropriate SGHs */ + + /* Create an interval tree of all the given ports to make the search + * for overlaps later on easier */ + SCPortIntervalTree *it = SCPortIntervalTreeInit(); + if (it == NULL) + goto error; + HashListTableBucket *htb = NULL; - for (htb = HashListTableGetListHead(de_ctx->dport_hash_table); - htb != NULL; - htb = HashListTableGetListNext(htb)) - { + for (htb = HashListTableGetListHead(de_ctx->dport_hash_table); htb != NULL; + htb = HashListTableGetListNext(htb)) { DetectPort *p = HashListTableGetListData(htb); - DetectPort *tmp = DetectPortCopySingle(de_ctx, p); - BUG_ON(tmp == NULL); - int r = DetectPortInsert(de_ctx, &list , tmp); - BUG_ON(r == -1); + if (SCPortIntervalInsert(de_ctx, it, p) != SC_OK) { + SCLogDebug("Port was not inserted in the tree"); + goto error; + } } + + /* Create a sorted list of ports in ascending order after resolving overlaps + * and corresponding SGHs */ + if (CreatePortList(de_ctx, unique_port_points, size_unique_port_arr, it, &list) < 0) + goto error; + + /* unique_port_points array is no longer needed */ + SCFree(unique_port_points); + + /* Port hashes are no longer needed */ DetectPortHashFree(de_ctx); de_ctx->dport_hash_table = NULL; @@ -1246,7 +1618,7 @@ static DetectPort *RulesGroupByPorts(DetectEngineCtx *de_ctx, uint8_t ipproto, u DetectPort *newlist = NULL; uint16_t groupmax = (direction == SIG_FLAG_TOCLIENT) ? de_ctx->max_uniq_toclient_groups : de_ctx->max_uniq_toserver_groups; - CreateGroupedPortList(de_ctx, list, &newlist, groupmax, CreateGroupedPortListCmpCnt, max_idx); + CreateGroupedPortList(de_ctx, list, &newlist, groupmax, max_idx, SortCompare); list = newlist; /* step 4: deduplicate the SGH's */ @@ -1294,7 +1666,16 @@ static DetectPort *RulesGroupByPorts(DetectEngineCtx *de_ctx, uint8_t ipproto, u ipproto == 6 ? "TCP" : "UDP", direction == SIG_FLAG_TOSERVER ? "toserver" : "toclient", cnt, own, ref); + SCPortIntervalTreeFree(de_ctx, it); return list; + +error: + if (unique_port_points != NULL) + SCFree(unique_port_points); + if (it != NULL) + SCPortIntervalTreeFree(de_ctx, it); + + return NULL; } void SignatureSetType(DetectEngineCtx *de_ctx, Signature *s) @@ -1515,179 +1896,6 @@ int SigAddressPrepareStage1(DetectEngineCtx *de_ctx) return -1; } -static int PortGroupWhitelist(const DetectPort *a) -{ - return a->sh->init->whitelist; -} - -int CreateGroupedPortListCmpCnt(DetectPort *a, DetectPort *b) -{ - if (PortGroupWhitelist(a) && !PortGroupWhitelist(b)) { - SCLogDebug("%u:%u (cnt %u, wl %d) wins against %u:%u (cnt %u, wl %d)", a->port, a->port2, - a->sh->init->sig_cnt, PortGroupWhitelist(a), b->port, b->port2, - b->sh->init->sig_cnt, PortGroupWhitelist(b)); - return 1; - } else if (!PortGroupWhitelist(a) && PortGroupWhitelist(b)) { - SCLogDebug("%u:%u (cnt %u, wl %d) loses against %u:%u (cnt %u, wl %d)", a->port, a->port2, - a->sh->init->sig_cnt, PortGroupWhitelist(a), b->port, b->port2, - b->sh->init->sig_cnt, PortGroupWhitelist(b)); - return 0; - } else if (PortGroupWhitelist(a) > PortGroupWhitelist(b)) { - SCLogDebug("%u:%u (cnt %u, wl %d) wins against %u:%u (cnt %u, wl %d)", a->port, a->port2, - a->sh->init->sig_cnt, PortGroupWhitelist(a), b->port, b->port2, - b->sh->init->sig_cnt, PortGroupWhitelist(b)); - return 1; - } else if (PortGroupWhitelist(a) == PortGroupWhitelist(b)) { - if (a->sh->init->sig_cnt > b->sh->init->sig_cnt) { - SCLogDebug("%u:%u (cnt %u, wl %d) wins against %u:%u (cnt %u, wl %d)", a->port, - a->port2, a->sh->init->sig_cnt, PortGroupWhitelist(a), b->port, b->port2, - b->sh->init->sig_cnt, PortGroupWhitelist(b)); - return 1; - } - } - - SCLogDebug("%u:%u (cnt %u, wl %d) loses against %u:%u (cnt %u, wl %d)", a->port, a->port2, - a->sh->init->sig_cnt, PortGroupWhitelist(a), b->port, b->port2, b->sh->init->sig_cnt, - PortGroupWhitelist(b)); - return 0; -} - -/** \internal - * \brief Create a list of DetectPort objects sorted based on CompareFunc's - * logic. - * - * List can limit the number of groups. In this case an extra "join" group - * is created that contains the sigs belonging to that. It's *appended* to - * the list, meaning that if the list is walked linearly it's found last. - * The joingr is meant to be a catch all. - * - */ -int CreateGroupedPortList(DetectEngineCtx *de_ctx, DetectPort *port_list, DetectPort **newhead, uint32_t unique_groups, int (*CompareFunc)(DetectPort *, DetectPort *), uint32_t max_idx) -{ - DetectPort *tmplist = NULL, *joingr = NULL; - char insert = 0; - uint32_t groups = 0; - DetectPort *list; - - /* insert the addresses into the tmplist, where it will - * be sorted descending on 'cnt' and on whether a group - * is whitelisted. */ - - DetectPort *oldhead = port_list; - while (oldhead) { - /* take the top of the list */ - list = oldhead; - oldhead = oldhead->next; - list->next = NULL; - - groups++; - - SigGroupHeadSetSigCnt(list->sh, max_idx); - - /* insert it */ - DetectPort *tmpgr = tmplist, *prevtmpgr = NULL; - if (tmplist == NULL) { - /* empty list, set head */ - tmplist = list; - } else { - /* look for the place to insert */ - for ( ; tmpgr != NULL && !insert; tmpgr = tmpgr->next) { - if (CompareFunc(list, tmpgr) == 1) { - if (tmpgr == tmplist) { - list->next = tmplist; - tmplist = list; - SCLogDebug("new list top: %u:%u", tmplist->port, tmplist->port2); - } else { - list->next = prevtmpgr->next; - prevtmpgr->next = list; - } - insert = 1; - break; - } - prevtmpgr = tmpgr; - } - if (insert == 0) { - list->next = NULL; - prevtmpgr->next = list; - } - insert = 0; - } - } - - uint32_t left = unique_groups; - if (left == 0) - left = groups; - - /* create another list: take the port groups from above - * and add them to the 2nd list until we have met our - * count. The rest is added to the 'join' group. */ - DetectPort *tmplist2 = NULL, *tmplist2_tail = NULL; - DetectPort *gr, *next_gr; - for (gr = tmplist; gr != NULL; ) { - next_gr = gr->next; - - SCLogDebug("temp list gr %p %u:%u", gr, gr->port, gr->port2); - DetectPortPrint(gr); - - /* if we've set up all the unique groups, add the rest to the - * catch-all joingr */ - if (left == 0) { - if (joingr == NULL) { - DetectPortParse(de_ctx, &joingr, "0:65535"); - if (joingr == NULL) { - goto error; - } - SCLogDebug("joingr => %u-%u", joingr->port, joingr->port2); - joingr->next = NULL; - } - SigGroupHeadCopySigs(de_ctx,gr->sh,&joingr->sh); - - /* when a group's sigs are added to the joingr, we can free it */ - gr->next = NULL; - DetectPortFree(de_ctx, gr); - /* append */ - } else { - gr->next = NULL; - - if (tmplist2 == NULL) { - tmplist2 = gr; - tmplist2_tail = gr; - } else { - tmplist2_tail->next = gr; - tmplist2_tail = gr; - } - } - - if (left > 0) - left--; - - gr = next_gr; - } - - /* if present, append the joingr that covers the rest */ - if (joingr != NULL) { - SCLogDebug("appending joingr %p %u:%u", joingr, joingr->port, joingr->port2); - - if (tmplist2 == NULL) { - tmplist2 = joingr; - //tmplist2_tail = joingr; - } else { - tmplist2_tail->next = joingr; - //tmplist2_tail = joingr; - } - } else { - SCLogDebug("no joingr"); - } - - /* pass back our new list to the caller */ - *newhead = tmplist2; - DetectPortPrintList(*newhead); - - return 0; -error: - return -1; -} - /** * \internal * \brief add a decoder event signature to the detection engine ctx diff --git a/src/detect-engine-content-inspection.c b/src/detect-engine-content-inspection.c index 8c5feb61a226..06b18283fb52 100644 --- a/src/detect-engine-content-inspection.c +++ b/src/detect-engine-content-inspection.c @@ -515,7 +515,7 @@ uint8_t DetectEngineContentInspection(DetectEngineCtx *de_ctx, DetectEngineThrea int32_t offset = bjd->offset; int32_t nbytes; - if (bjflags & DETECT_CONTENT_OFFSET_VAR) { + if (bjflags & DETECT_BYTEJUMP_OFFSET_VAR) { offset = det_ctx->byte_values[offset]; } diff --git a/src/detect-engine-iponly.c b/src/detect-engine-iponly.c index 03b464982178..b163277d671d 100644 --- a/src/detect-engine-iponly.c +++ b/src/detect-engine-iponly.c @@ -82,16 +82,78 @@ static IPOnlyCIDRItem *IPOnlyCIDRItemNew(void) SCReturnPtr(item, "IPOnlyCIDRItem"); } -static uint8_t IPOnlyCIDRItemCompare(IPOnlyCIDRItem *head, - IPOnlyCIDRItem *item) +/** + * \brief Compares two list items + * + * \retval An integer less than, equal to, or greater than zero if lhs is + * considered to be respectively less than, equal to, or greater than + * rhs. + */ +static int IPOnlyCIDRItemCompareReal(const IPOnlyCIDRItem *lhs, const IPOnlyCIDRItem *rhs) { - uint8_t i = 0; - for (; i < head->netmask / 32 || i < 1; i++) { - if (item->ip[i] < head->ip[i]) - //if (*(uint8_t *)(item->ip + i) < *(uint8_t *)(head->ip + i)) - return 1; + if (lhs->netmask == rhs->netmask) { + uint8_t i = 0; + for (; i < lhs->netmask / 32 || i < 1; i++) { + if (lhs->ip[i] < rhs->ip[i]) + return -1; + if (lhs->ip[i] > rhs->ip[i]) + return 1; + } + return 0; } - return 0; + + return lhs->netmask < rhs->netmask ? -1 : 1; +} + +static int IPOnlyCIDRItemCompare(const void *lhsv, const void *rhsv) +{ + const IPOnlyCIDRItem *lhs = *(const IPOnlyCIDRItem **)lhsv; + const IPOnlyCIDRItem *rhs = *(const IPOnlyCIDRItem **)rhsv; + + return IPOnlyCIDRItemCompareReal(lhs, rhs); +} + +static void IPOnlyCIDRListQSort(IPOnlyCIDRItem **head) +{ + if (unlikely(head == NULL || *head == NULL)) + return; + + // First count the number of elements in the list + size_t len = 0; + IPOnlyCIDRItem *curr = *head; + + while (curr) { + curr = curr->next; + len++; + } + + // Place a pointer to the list item in an array for sorting + IPOnlyCIDRItem **tmp = SCMalloc(len * sizeof(IPOnlyCIDRItem *)); + + if (unlikely(tmp == NULL)) { + SCLogError("Failed to allocate enough memory to sort IP-only CIDR items."); + return; + } + + curr = *head; + for (size_t i = 0; i < len; i++) { + tmp[i] = curr; + curr = curr->next; + } + + // Perform the sort using the qsort algorithm + qsort(tmp, len, sizeof(IPOnlyCIDRItem *), IPOnlyCIDRItemCompare); + + // Update the links to the next element + *head = tmp[0]; + + for (size_t i = 0; i + 1 < len; i++) { + tmp[i]->next = tmp[i + 1]; + } + + tmp[len - 1]->next = NULL; + + SCFree(tmp); } //declaration for using it already @@ -349,11 +411,9 @@ static int IPOnlyCIDRItemSetup(IPOnlyCIDRItem **gh, char *s) return -1; } - /** * \brief This function insert a IPOnlyCIDRItem - * to a list of IPOnlyCIDRItems sorted by netmask - * ascending + * to a list of IPOnlyCIDRItems * \param head Pointer to the head of IPOnlyCIDRItems list * \param item Pointer to the item to insert in the list * @@ -362,37 +422,12 @@ static int IPOnlyCIDRItemSetup(IPOnlyCIDRItem **gh, char *s) static IPOnlyCIDRItem *IPOnlyCIDRItemInsertReal(IPOnlyCIDRItem *head, IPOnlyCIDRItem *item) { - IPOnlyCIDRItem *it, *prev = NULL; - if (item == NULL) return head; - /* Compare with the head */ - if (item->netmask < head->netmask || (item->netmask == head->netmask && IPOnlyCIDRItemCompare(head, item))) { - item->next = head; - return item; - } - - if (item->netmask == head->netmask && !IPOnlyCIDRItemCompare(head, item)) { - item->next = head->next; - head->next = item; - return head; - } - - for (prev = it = head; - it != NULL && it->netmask < item->netmask; - it = it->next) - prev = it; - - if (it == NULL) { - prev->next = item; - item->next = NULL; - } else { - item->next = it; - prev->next = item; - } - - return head; + /* Always insert item as head */ + item->next = head; + return item; } /** @@ -1112,6 +1147,9 @@ void IPOnlyPrepare(DetectEngineCtx *de_ctx) IPOnlyCIDRListPrint((de_ctx->io_ctx).ip_dst); */ + IPOnlyCIDRListQSort(&(de_ctx->io_ctx).ip_src); + IPOnlyCIDRListQSort(&(de_ctx->io_ctx).ip_dst); + IPOnlyCIDRItem *src, *dst; SCRadixNode *node = NULL; @@ -1729,64 +1767,124 @@ static int IPOnlyTestSig03 (void) static int IPOnlyTestSig04 (void) { int result = 1; - IPOnlyCIDRItem *head = NULL; - IPOnlyCIDRItem *new; - new = IPOnlyCIDRItemNew(); - new->netmask= 10; + // Test a linked list of size 0, 1, 2, ..., 5 + for (int size = 0; size < 6; size++) { + IPOnlyCIDRItem *new = NULL; - head = IPOnlyCIDRItemInsert(head, new); + if (size > 0) { + new = IPOnlyCIDRItemNew(); + new->netmask = 10; + new->ip[0] = 3; - new = IPOnlyCIDRItemNew(); - new->netmask= 11; + head = IPOnlyCIDRItemInsert(head, new); + } - head = IPOnlyCIDRItemInsert(head, new); + if (size > 1) { + new = IPOnlyCIDRItemNew(); + new->netmask = 11; - new = IPOnlyCIDRItemNew(); - new->netmask= 9; + head = IPOnlyCIDRItemInsert(head, new); + } - head = IPOnlyCIDRItemInsert(head, new); + if (size > 2) { + new = IPOnlyCIDRItemNew(); + new->netmask = 9; - new = IPOnlyCIDRItemNew(); - new->netmask= 10; + head = IPOnlyCIDRItemInsert(head, new); + } - head = IPOnlyCIDRItemInsert(head, new); + if (size > 3) { + new = IPOnlyCIDRItemNew(); + new->netmask = 10; + new->ip[0] = 1; - new = IPOnlyCIDRItemNew(); - new->netmask= 10; + head = IPOnlyCIDRItemInsert(head, new); + } - head = IPOnlyCIDRItemInsert(head, new); + if (size > 4) { + new = IPOnlyCIDRItemNew(); + new->netmask = 10; + new->ip[0] = 2; - IPOnlyCIDRListPrint(head); - new = head; - if (new->netmask != 9) { - result = 0; - goto end; - } - new = new->next; - if (new->netmask != 10) { - result = 0; - goto end; - } - new = new->next; - if (new->netmask != 10) { - result = 0; - goto end; - } - new = new->next; - if (new->netmask != 10) { - result = 0; - goto end; - } - new = new->next; - if (new->netmask != 11) { - result = 0; - goto end; + head = IPOnlyCIDRItemInsert(head, new); + } + + IPOnlyCIDRListPrint(head); + + IPOnlyCIDRListQSort(&head); + + if (size == 0) { + if (head != NULL) { + result = 0; + goto end; + } + } + + /** + * Validate the following list entries for each size + * 1 - 10 + * 2 - 10<3> 11 + * 3 - 9 10<3> 11 + * 4 - 9 10<1> 10<3> 11 + * 5 - 9 10<1> 10<2> 10<3> 11 + */ + new = head; + if (size >= 3) { + if (new->netmask != 9) { + result = 0; + goto end; + } + new = new->next; + } + + if (size >= 4) { + if (new->netmask != 10 || new->ip[0] != 1) { + result = 0; + goto end; + } + new = new->next; + } + + if (size >= 5) { + if (new->netmask != 10 || new->ip[0] != 2) { + result = 0; + goto end; + } + new = new->next; + } + + if (size >= 1) { + if (new->netmask != 10 || new->ip[0] != 3) { + result = 0; + goto end; + } + new = new->next; + } + + if (size >= 2) { + if (new->netmask != 11) { + result = 0; + goto end; + } + new = new->next; + } + + if (new != NULL) { + result = 0; + goto end; + } + + IPOnlyCIDRListFree(head); + head = NULL; } end: - IPOnlyCIDRListFree(head); + if (head) { + IPOnlyCIDRListFree(head); + head = NULL; + } return result; } diff --git a/src/detect-engine-loader.c b/src/detect-engine-loader.c index 3ef29b9b40f1..0cdb453388ab 100644 --- a/src/detect-engine-loader.c +++ b/src/detect-engine-loader.c @@ -44,6 +44,8 @@ #include "util-threshold-config.h" #include "util-path.h" +#include "rust.h" + #ifdef HAVE_GLOB_H #include #endif @@ -109,17 +111,18 @@ char *DetectLoadCompleteSigPath(const DetectEngineCtx *de_ctx, const char *sig_f * \param badsigs_tot Will store number of invalid signatures in the file * \retval 0 on success, -1 on error */ -static int DetectLoadSigFile(DetectEngineCtx *de_ctx, char *sig_file, - int *goodsigs, int *badsigs) +static int DetectLoadSigFile( + DetectEngineCtx *de_ctx, char *sig_file, int *goodsigs, int *badsigs, int *skippedsigs) { Signature *sig = NULL; - int good = 0, bad = 0; + int good = 0, bad = 0, skipped = 0; char line[DETECT_MAX_RULE_SIZE] = ""; size_t offset = 0; int lineno = 0, multiline = 0; (*goodsigs) = 0; (*badsigs) = 0; + (*skippedsigs) = 0; FILE *fp = fopen(sig_file, "r"); if (fp == NULL) { @@ -196,6 +199,12 @@ static int DetectLoadSigFile(DetectEngineCtx *de_ctx, char *sig_file, if (!de_ctx->sigerror_ok) { bad++; } + if (de_ctx->sigerror_requires) { + SCLogInfo("Skipping signature due to missing requirements: %s from file %s at line " + "%" PRId32, + line, sig_file, lineno - multiline); + skipped++; + } } multiline = 0; } @@ -203,6 +212,7 @@ static int DetectLoadSigFile(DetectEngineCtx *de_ctx, char *sig_file, *goodsigs = good; *badsigs = bad; + *skippedsigs = skipped; return 0; } @@ -212,8 +222,8 @@ static int DetectLoadSigFile(DetectEngineCtx *de_ctx, char *sig_file, * \param sig_file Filename (or pattern) holding signatures * \retval -1 on error */ -static int ProcessSigFiles(DetectEngineCtx *de_ctx, char *pattern, - SigFileLoaderStat *st, int *good_sigs, int *bad_sigs) +static int ProcessSigFiles(DetectEngineCtx *de_ctx, char *pattern, SigFileLoaderStat *st, + int *good_sigs, int *bad_sigs, int *skipped_sigs) { int r = 0; @@ -246,7 +256,7 @@ static int ProcessSigFiles(DetectEngineCtx *de_ctx, char *pattern, return 0; #endif SCLogConfig("Loading rule file: %s", fname); - r = DetectLoadSigFile(de_ctx, fname, good_sigs, bad_sigs); + r = DetectLoadSigFile(de_ctx, fname, good_sigs, bad_sigs, skipped_sigs); if (r < 0) { ++(st->bad_files); } @@ -255,6 +265,7 @@ static int ProcessSigFiles(DetectEngineCtx *de_ctx, char *pattern, st->good_sigs_total += *good_sigs; st->bad_sigs_total += *bad_sigs; + st->skipped_sigs_total += *skipped_sigs; #ifdef HAVE_GLOB_H } @@ -282,6 +293,7 @@ int SigLoadSignatures(DetectEngineCtx *de_ctx, char *sig_file, int sig_file_excl char varname[128] = "rule-files"; int good_sigs = 0; int bad_sigs = 0; + int skipped_sigs = 0; if (strlen(de_ctx->config_prefix) > 0) { snprintf(varname, sizeof(varname), "%s.rule-files", @@ -303,8 +315,9 @@ int SigLoadSignatures(DetectEngineCtx *de_ctx, char *sig_file, int sig_file_excl else { TAILQ_FOREACH(file, &rule_files->head, next) { sfile = DetectLoadCompleteSigPath(de_ctx, file->val); - good_sigs = bad_sigs = 0; - ret = ProcessSigFiles(de_ctx, sfile, sig_stat, &good_sigs, &bad_sigs); + good_sigs = bad_sigs = skipped_sigs = 0; + ret = ProcessSigFiles( + de_ctx, sfile, sig_stat, &good_sigs, &bad_sigs, &skipped_sigs); SCFree(sfile); if (de_ctx->failure_fatal && ret != 0) { @@ -323,7 +336,7 @@ int SigLoadSignatures(DetectEngineCtx *de_ctx, char *sig_file, int sig_file_excl /* If a Signature file is specified from command-line, parse it too */ if (sig_file != NULL) { - ret = ProcessSigFiles(de_ctx, sig_file, sig_stat, &good_sigs, &bad_sigs); + ret = ProcessSigFiles(de_ctx, sig_file, sig_stat, &good_sigs, &bad_sigs, &skipped_sigs); if (ret != 0) { if (de_ctx->failure_fatal) { @@ -347,8 +360,14 @@ int SigLoadSignatures(DetectEngineCtx *de_ctx, char *sig_file, int sig_file_excl } } else { /* we report the total of files and rules successfully loaded and failed */ - SCLogInfo("%" PRId32 " rule files processed. %" PRId32 " rules successfully loaded, %" PRId32 " rules failed", - sig_stat->total_files, sig_stat->good_sigs_total, sig_stat->bad_sigs_total); + SCLogInfo("%" PRId32 " rule files processed. %" PRId32 + " rules successfully loaded, %" PRId32 " rules failed, %" PRId32, + sig_stat->total_files, sig_stat->good_sigs_total, sig_stat->bad_sigs_total, + sig_stat->skipped_sigs_total); + if (de_ctx->requirements != NULL && sig_stat->skipped_sigs_total > 0) { + SCDetectRequiresStatusLog(de_ctx->requirements, PROG_VER, + strlen(de_ctx->config_prefix) > 0 ? de_ctx->tenant_id : 0); + } } if ((sig_stat->bad_sigs_total || sig_stat->bad_files) && de_ctx->failure_fatal) { @@ -437,6 +456,12 @@ int DetectLoadersSync(void) done = true; } SCMutexUnlock(&loader->m); + if (!done) { + /* nudge thread in case it's sleeping */ + SCCtrlMutexLock(loader->tv->ctrl_mutex); + pthread_cond_broadcast(loader->tv->ctrl_cond); + SCCtrlMutexUnlock(loader->tv->ctrl_mutex); + } } SCMutexLock(&loader->m); if (loader->result != 0) { @@ -492,7 +517,9 @@ static void TmThreadWakeupDetectLoaderThreads(void) while (tv != NULL) { if (strncmp(tv->name,"DL#",3) == 0) { BUG_ON(tv->ctrl_cond == NULL); + SCCtrlMutexLock(tv->ctrl_mutex); pthread_cond_broadcast(tv->ctrl_cond); + SCCtrlMutexUnlock(tv->ctrl_mutex); } tv = tv->next; } @@ -536,6 +563,9 @@ static TmEcode DetectLoaderThreadInit(ThreadVars *t, const void *initdata, void /* pass thread data back to caller */ *data = ftd; + DetectLoaderControl *loader = &loaders[ftd->instance]; + loader->tv = t; + return TM_ECODE_OK; } diff --git a/src/detect-engine-loader.h b/src/detect-engine-loader.h index 7ffb8c8648a0..f43ff9a5491c 100644 --- a/src/detect-engine-loader.h +++ b/src/detect-engine-loader.h @@ -43,9 +43,14 @@ typedef struct DetectLoaderTask_ { typedef struct DetectLoaderControl_ { int id; - int result; /* 0 for ok, error otherwise */ - SCMutex m; - TAILQ_HEAD(, DetectLoaderTask_) task_list; + ThreadVars *tv; /**< loader threads threadvars - for waking them up */ + + /** struct to group members and mutex */ + struct { + SCMutex m; /**< mutex protects result and task_list */ + int result; /**< 0 for ok, error otherwise */ + TAILQ_HEAD(, DetectLoaderTask_) task_list; + }; } DetectLoaderControl; int DetectLoaderQueueTask(int loader_id, LoaderFunc Func, void *func_ctx, LoaderFreeFunc FreeFunc); diff --git a/src/detect-engine-mpm.c b/src/detect-engine-mpm.c index f091a3dadaa0..ede3e594d927 100644 --- a/src/detect-engine-mpm.c +++ b/src/detect-engine-mpm.c @@ -772,18 +772,12 @@ int SignatureHasPacketContent(const Signature *s) { SCEnter(); - if (s == NULL) { - SCReturnInt(0); - } - if (!(s->proto.proto[IPPROTO_TCP / 8] & 1 << (IPPROTO_TCP % 8))) { SCReturnInt(1); } - if ((s->init_data != NULL && s->init_data->smlists[DETECT_SM_LIST_PMATCH] == NULL) || - (s->init_data == NULL && s->sm_arrays[DETECT_SM_LIST_PMATCH] == NULL)) - { - SCLogDebug("no mpm"); + if (s->init_data->smlists[DETECT_SM_LIST_PMATCH] == NULL) { + SCLogDebug("no PMATCH"); SCReturnInt(0); } @@ -808,18 +802,12 @@ int SignatureHasStreamContent(const Signature *s) { SCEnter(); - if (s == NULL) { - SCReturnInt(0); - } - if (!(s->proto.proto[IPPROTO_TCP / 8] & 1 << (IPPROTO_TCP % 8))) { SCReturnInt(0); } - if ((s->init_data != NULL && s->init_data->smlists[DETECT_SM_LIST_PMATCH] == NULL) || - (s->init_data == NULL && s->sm_arrays[DETECT_SM_LIST_PMATCH] == NULL)) - { - SCLogDebug("no mpm"); + if (s->init_data->smlists[DETECT_SM_LIST_PMATCH] == NULL) { + SCLogDebug("no PMATCH"); SCReturnInt(0); } diff --git a/src/detect-engine-port.c b/src/detect-engine-port.c index 04d2a49d557f..4768df993db8 100644 --- a/src/detect-engine-port.c +++ b/src/detect-engine-port.c @@ -64,7 +64,7 @@ static bool DetectPortIsValidRange(char *, uint16_t *); * * \retval dp newly created DetectPort on success; or NULL in case of error. */ -static DetectPort *DetectPortInit(void) +DetectPort *DetectPortInit(void) { DetectPort *dp = SCCalloc(1, sizeof(DetectPort)); if (unlikely(dp == NULL)) @@ -173,7 +173,6 @@ int DetectPortInsert(DetectEngineCtx *de_ctx, DetectPort **head, SCLogDebug("PORT_EQ %p %p", cur, new); /* exact overlap/match */ if (cur != new) { - SigGroupHeadCopySigs(de_ctx, new->sh, &cur->sh); DetectPortFree(de_ctx, new); return 0; } @@ -303,9 +302,6 @@ static int DetectPortCut(DetectEngineCtx *de_ctx, DetectPort *a, tmp_c->port = a_port2 + 1; tmp_c->port2 = b_port2; - SigGroupHeadCopySigs(de_ctx,b->sh,&tmp_c->sh); /* copy old b to c */ - SigGroupHeadCopySigs(de_ctx,a->sh,&b->sh); /* copy a to b */ - /** * We have 3 parts: [bbb[baba]aaa] * part a: b_port1 <-> a_port1 - 1 @@ -329,19 +325,6 @@ static int DetectPortCut(DetectEngineCtx *de_ctx, DetectPort *a, tmp_c->port = b_port2 + 1; tmp_c->port2 = a_port2; - /** - * 'a' gets clean and then 'b' sigs - * 'b' gets clean, then 'a' then 'b' sigs - * 'c' gets 'a' sigs - */ - SigGroupHeadCopySigs(de_ctx,a->sh,&tmp->sh); /* store old a list */ - SigGroupHeadClearSigs(a->sh); /* clean a list */ - SigGroupHeadCopySigs(de_ctx,tmp->sh,&tmp_c->sh); /* copy old b to c */ - SigGroupHeadCopySigs(de_ctx,b->sh,&a->sh); /* copy old b to a */ - SigGroupHeadCopySigs(de_ctx,tmp->sh,&b->sh);/* prepend old a before b */ - - SigGroupHeadClearSigs(tmp->sh); /* clean tmp list */ - /** * We have 2 or three parts: * @@ -368,10 +351,6 @@ static int DetectPortCut(DetectEngineCtx *de_ctx, DetectPort *a, b->port = a_port2 + 1; b->port2 = b_port2; - - /** 'b' overlaps 'a' so 'a' needs the 'b' sigs */ - SigGroupHeadCopySigs(de_ctx,b->sh,&a->sh); - } else if (a_port2 == b_port2) { SCLogDebug("2"); a->port = b_port1; @@ -379,18 +358,6 @@ static int DetectPortCut(DetectEngineCtx *de_ctx, DetectPort *a, b->port = a_port1; b->port2 = a_port2; - - /* [bbb[baba]] will be transformed into - * [aaa][bbb] - * steps: copy b sigs to tmp - * a overlaps b, so copy a to b - * clear a - * copy tmp to a */ - SigGroupHeadCopySigs(de_ctx,b->sh,&tmp->sh); /* store old a list */ - SigGroupHeadCopySigs(de_ctx,a->sh,&b->sh); - SigGroupHeadClearSigs(a->sh); /* clean a list */ - SigGroupHeadCopySigs(de_ctx,tmp->sh,&a->sh);/* merge old a with b */ - SigGroupHeadClearSigs(tmp->sh); /* clean tmp list */ } else { SCLogDebug("3"); a->port = b_port1; @@ -407,19 +374,6 @@ static int DetectPortCut(DetectEngineCtx *de_ctx, DetectPort *a, tmp_c->port = a_port2 + 1; tmp_c->port2 = b_port2; - - /** - * 'a' gets clean and then 'b' sigs - * 'b' gets clean, then 'a' then 'b' sigs - * 'c' gets 'b' sigs - */ - SigGroupHeadCopySigs(de_ctx,a->sh,&tmp->sh); /* store old a list */ - SigGroupHeadClearSigs(a->sh); /* clean a list */ - SigGroupHeadCopySigs(de_ctx,b->sh,&tmp_c->sh); /* copy old b to c */ - SigGroupHeadCopySigs(de_ctx,b->sh,&a->sh); /* copy old b to a */ - SigGroupHeadCopySigs(de_ctx,tmp->sh,&b->sh);/* merge old a with b */ - - SigGroupHeadClearSigs(tmp->sh); /* clean tmp list */ } /** * We have 2 or three parts: @@ -447,15 +401,6 @@ static int DetectPortCut(DetectEngineCtx *de_ctx, DetectPort *a, b->port = b_port2 + 1; b->port2 = a_port2; - - /** 'b' overlaps 'a' so 'a' needs the 'b' sigs */ - SigGroupHeadCopySigs(de_ctx,b->sh,&tmp->sh); - SigGroupHeadClearSigs(b->sh); - SigGroupHeadCopySigs(de_ctx,a->sh,&b->sh); - SigGroupHeadCopySigs(de_ctx,tmp->sh,&a->sh); - - SigGroupHeadClearSigs(tmp->sh); - } else if (a_port2 == b_port2) { SCLogDebug("2"); @@ -465,9 +410,6 @@ static int DetectPortCut(DetectEngineCtx *de_ctx, DetectPort *a, b->port = b_port1; b->port2 = b_port2; - /** 'a' overlaps 'b' so 'b' needs the 'a' sigs */ - SigGroupHeadCopySigs(de_ctx,a->sh,&b->sh); - } else { SCLogDebug("3"); a->port = a_port1; @@ -484,9 +426,6 @@ static int DetectPortCut(DetectEngineCtx *de_ctx, DetectPort *a, tmp_c->port = b_port2 + 1; tmp_c->port2 = a_port2; - - SigGroupHeadCopySigs(de_ctx,a->sh,&b->sh); - SigGroupHeadCopySigs(de_ctx,a->sh,&tmp_c->sh); } } @@ -1994,162 +1933,6 @@ static int PortTestFunctions04(void) return result; } -/** - * \test Test general functions - */ -static int PortTestFunctions05(void) -{ - DetectPort *dp1 = NULL; - DetectPort *dp2 = NULL; - DetectPort *dp3 = NULL; - int result = 0; - int r = 0; - - DetectEngineCtx *de_ctx = DetectEngineCtxInit(); - Signature s[2]; - memset(s,0x00,sizeof(s)); - - s[0].num = 0; - s[1].num = 1; - - r = DetectPortParse(NULL, &dp1, "1024:65535"); - if (r != 0) { - printf("r != 0 but %d: ", r); - goto end; - } - SigGroupHeadAppendSig(de_ctx, &dp1->sh, &s[0]); - - r = DetectPortParse(NULL, &dp2, "any"); - if (r != 0) { - printf("r != 0 but %d: ", r); - goto end; - } - SigGroupHeadAppendSig(de_ctx, &dp2->sh, &s[1]); - - SCLogDebug("dp1"); - DetectPortPrint(dp1); - SCLogDebug("dp2"); - DetectPortPrint(dp2); - - DetectPortInsert(de_ctx, &dp3, dp1); - DetectPortInsert(de_ctx, &dp3, dp2); - - if (dp3 == NULL) - goto end; - - SCLogDebug("dp3"); - DetectPort *x = dp3; - for ( ; x != NULL; x = x->next) { - DetectPortPrint(x); - //SigGroupHeadPrintSigs(de_ctx, x->sh); - } - - DetectPort *one = dp3; - DetectPort *two = dp3->next; - - int sig = 0; - if ((one->sh->init->sig_array[sig / 8] & (1 << (sig % 8)))) { - printf("sig %d part of 'one', but it shouldn't: ", sig); - goto end; - } - sig = 1; - if (!(one->sh->init->sig_array[sig / 8] & (1 << (sig % 8)))) { - printf("sig %d part of 'one', but it shouldn't: ", sig); - goto end; - } - sig = 1; - if (!(two->sh->init->sig_array[sig / 8] & (1 << (sig % 8)))) { - printf("sig %d part of 'two', but it shouldn't: ", sig); - goto end; - } - - result = 1; -end: - if (dp1 != NULL) - DetectPortFree(NULL, dp1); - if (dp2 != NULL) - DetectPortFree(NULL, dp2); - return result; -} - -/** - * \test Test general functions - */ -static int PortTestFunctions06(void) -{ - DetectPort *dp1 = NULL; - DetectPort *dp2 = NULL; - DetectPort *dp3 = NULL; - int result = 0; - int r = 0; - - DetectEngineCtx *de_ctx = DetectEngineCtxInit(); - Signature s[2]; - memset(s,0x00,sizeof(s)); - - s[0].num = 0; - s[1].num = 1; - - r = DetectPortParse(NULL, &dp1, "1024:65535"); - if (r != 0) { - printf("r != 0 but %d: ", r); - goto end; - } - SigGroupHeadAppendSig(de_ctx, &dp1->sh, &s[0]); - - r = DetectPortParse(NULL, &dp2, "any"); - if (r != 0) { - printf("r != 0 but %d: ", r); - goto end; - } - SigGroupHeadAppendSig(de_ctx, &dp2->sh, &s[1]); - - SCLogDebug("dp1"); - DetectPortPrint(dp1); - SCLogDebug("dp2"); - DetectPortPrint(dp2); - - DetectPortInsert(de_ctx, &dp3, dp2); - DetectPortInsert(de_ctx, &dp3, dp1); - - if (dp3 == NULL) - goto end; - - SCLogDebug("dp3"); - DetectPort *x = dp3; - for ( ; x != NULL; x = x->next) { - DetectPortPrint(x); - //SigGroupHeadPrintSigs(de_ctx, x->sh); - } - - DetectPort *one = dp3; - DetectPort *two = dp3->next; - - int sig = 0; - if ((one->sh->init->sig_array[sig / 8] & (1 << (sig % 8)))) { - printf("sig %d part of 'one', but it shouldn't: ", sig); - goto end; - } - sig = 1; - if (!(one->sh->init->sig_array[sig / 8] & (1 << (sig % 8)))) { - printf("sig %d part of 'one', but it shouldn't: ", sig); - goto end; - } - sig = 1; - if (!(two->sh->init->sig_array[sig / 8] & (1 << (sig % 8)))) { - printf("sig %d part of 'two', but it shouldn't: ", sig); - goto end; - } - - result = 1; -end: - if (dp1 != NULL) - DetectPortFree(NULL, dp1); - if (dp2 != NULL) - DetectPortFree(NULL, dp2); - return result; -} - /** * \test Test general functions */ @@ -2569,8 +2352,6 @@ void DetectPortTests(void) UtRegisterTest("PortTestFunctions02", PortTestFunctions02); UtRegisterTest("PortTestFunctions03", PortTestFunctions03); UtRegisterTest("PortTestFunctions04", PortTestFunctions04); - UtRegisterTest("PortTestFunctions05", PortTestFunctions05); - UtRegisterTest("PortTestFunctions06", PortTestFunctions06); UtRegisterTest("PortTestFunctions07", PortTestFunctions07); UtRegisterTest("PortTestMatchReal01", PortTestMatchReal01); UtRegisterTest("PortTestMatchReal02", PortTestMatchReal02); diff --git a/src/detect-engine-port.h b/src/detect-engine-port.h index d27dff3d307f..641a4e238503 100644 --- a/src/detect-engine-port.h +++ b/src/detect-engine-port.h @@ -24,6 +24,34 @@ #ifndef __DETECT_PORT_H__ #define __DETECT_PORT_H__ +#include "interval-tree.h" +#include "detect.h" + +typedef struct SCPortIntervalNode { + uint16_t port; /* low port of a port range */ + uint16_t port2; /* high port of a port range */ + uint16_t max; /* max value of the high port in the subtree rooted at this node */ + + struct SigGroupHead_ *sh; /* SGHs corresponding to this port */ + + IRB_ENTRY(SCPortIntervalNode) irb; /* parent entry of the interval tree */ +} SCPortIntervalNode; + +IRB_HEAD(PI, SCPortIntervalNode); /* head of the interval tree */ +IRB_PROTOTYPE(PI, SCPortIntervalNode, irb, + SCPortIntervalCompare); /* prototype definition of the interval tree */ + +typedef struct SCPortIntervalTree_ { + struct PI tree; + SCPortIntervalNode *head; +} SCPortIntervalTree; + +SCPortIntervalTree *SCPortIntervalTreeInit(void); +void SCPortIntervalTreeFree(DetectEngineCtx *, SCPortIntervalTree *); +int SCPortIntervalInsert(DetectEngineCtx *, SCPortIntervalTree *, const DetectPort *); +void SCPortIntervalFindOverlappingRanges( + DetectEngineCtx *, const uint16_t, const uint16_t, const struct PI *, DetectPort **); + /* prototypes */ int DetectPortParse(const DetectEngineCtx *, DetectPort **head, const char *str); @@ -38,6 +66,7 @@ bool DetectPortListsAreEqual(DetectPort *list1, DetectPort *list2); void DetectPortPrint(DetectPort *); void DetectPortPrintList(DetectPort *head); int DetectPortCmp(DetectPort *, DetectPort *); +DetectPort *DetectPortInit(void); void DetectPortFree(const DetectEngineCtx *de_ctx, DetectPort *); int DetectPortTestConfVars(void); diff --git a/src/detect-engine-register.c b/src/detect-engine-register.c index df6e4a738ffc..62e314cc3f5f 100644 --- a/src/detect-engine-register.c +++ b/src/detect-engine-register.c @@ -114,6 +114,7 @@ #include "detect-rev.h" #include "detect-flow.h" #include "detect-flow-age.h" +#include "detect-requires.h" #include "detect-tcp-window.h" #include "detect-ftpbounce.h" #include "detect-isdataat.h" @@ -230,6 +231,7 @@ #include "detect-quic-version.h" #include "detect-quic-cyu-hash.h" #include "detect-quic-cyu-string.h" +#include "detect-ja4-hash.h" #include "detect-bypass.h" #include "detect-ftpdata.h" @@ -237,6 +239,7 @@ #include "detect-transform-compress-whitespace.h" #include "detect-transform-strip-whitespace.h" +#include "detect-transform-strip-pseudo-headers.h" #include "detect-transform-md5.h" #include "detect-transform-sha1.h" #include "detect-transform-sha256.h" @@ -244,6 +247,8 @@ #include "detect-transform-pcrexform.h" #include "detect-transform-urldecode.h" #include "detect-transform-xor.h" +#include "detect-transform-header-lowercase.h" +#include "detect-transform-casechange.h" #include "util-rule-vars.h" @@ -561,6 +566,7 @@ void SigTableSetup(void) DetectReplaceRegister(); DetectFlowRegister(); DetectFlowAgeRegister(); + DetectRequiresRegister(); DetectWindowRegister(); DetectRpcRegister(); DetectFtpbounceRegister(); @@ -683,12 +689,14 @@ void SigTableSetup(void) DetectQuicVersionRegister(); DetectQuicCyuHashRegister(); DetectQuicCyuStringRegister(); + DetectJa4HashRegister(); DetectBypassRegister(); DetectConfigRegister(); DetectTransformCompressWhitespaceRegister(); DetectTransformStripWhitespaceRegister(); + DetectTransformStripPseudoHeadersRegister(); DetectTransformMd5Register(); DetectTransformSha1Register(); DetectTransformSha256Register(); @@ -696,6 +704,9 @@ void SigTableSetup(void) DetectTransformPcrexformRegister(); DetectTransformUrlDecodeRegister(); DetectTransformXorRegister(); + DetectTransformHeaderLowercaseRegister(); + DetectTransformToLowerRegister(); + DetectTransformToUpperRegister(); DetectFileHandlerRegister(); diff --git a/src/detect-engine-register.h b/src/detect-engine-register.h index 7d6c457ef9b0..800d85b59d3c 100644 --- a/src/detect-engine-register.h +++ b/src/detect-engine-register.h @@ -87,6 +87,7 @@ enum DetectKeywordId { DETECT_FLOWINT, DETECT_PKTVAR, DETECT_NOALERT, + DETECT_ALERT, DETECT_FLOWBITS, DETECT_HOSTBITS, DETECT_IPV4_CSUM, @@ -111,6 +112,8 @@ enum DetectKeywordId { DETECT_FLOW_AGE, + DETECT_REQUIRES, + DETECT_AL_TLS_VERSION, DETECT_AL_TLS_SUBJECT, DETECT_AL_TLS_ISSUERDN, @@ -314,6 +317,7 @@ enum DetectKeywordId { DETECT_TRANSFORM_COMPRESS_WHITESPACE, DETECT_TRANSFORM_STRIP_WHITESPACE, + DETECT_TRANSFORM_STRIP_PSEUDO_HEADERS, DETECT_TRANSFORM_MD5, DETECT_TRANSFORM_SHA1, DETECT_TRANSFORM_SHA256, @@ -321,6 +325,9 @@ enum DetectKeywordId { DETECT_TRANSFORM_PCREXFORM, DETECT_TRANSFORM_URL_DECODE, DETECT_TRANSFORM_XOR, + DETECT_TRANSFORM_HEADER_LOWERCASE, + DETECT_TRANSFORM_TOLOWER, + DETECT_TRANSFORM_TOUPPER, DETECT_AL_IKE_EXCH_TYPE, DETECT_AL_IKE_SPI_INITIATOR, @@ -332,6 +339,8 @@ enum DetectKeywordId { DETECT_AL_IKE_NONCE, DETECT_AL_IKE_KEY_EXCHANGE, + DETECT_AL_JA4_HASH, + /* make sure this stays last */ DETECT_TBLSIZE, }; diff --git a/src/detect-engine-siggroup.c b/src/detect-engine-siggroup.c index 67af1c115cf4..f47b19234f21 100644 --- a/src/detect-engine-siggroup.c +++ b/src/detect-engine-siggroup.c @@ -48,6 +48,7 @@ #include "util-error.h" #include "util-debug.h" +#include "util-validate.h" #include "util-cidr.h" #include "util-unittest.h" #include "util-unittest-helper.h" @@ -63,7 +64,7 @@ void SigGroupHeadInitDataFree(SigGroupHeadInitData *sghid) sghid->match_array = NULL; } if (sghid->sig_array != NULL) { - SCFree(sghid->sig_array); + SCFreeAligned(sghid->sig_array); sghid->sig_array = NULL; } if (sghid->app_mpms != NULL) { @@ -93,9 +94,12 @@ static SigGroupHeadInitData *SigGroupHeadInitDataAlloc(uint32_t size) memset(sghid, 0x00, sizeof(SigGroupHeadInitData)); /* initialize the signature bitarray */ - sghid->sig_size = size; - if ( (sghid->sig_array = SCMalloc(sghid->sig_size)) == NULL) + size = sghid->sig_size = size + 16 - (size % 16); + void *ptr = SCMallocAligned(sghid->sig_size, 16); + if (ptr == NULL) goto error; + memset(ptr, 0, size); + sghid->sig_array = ptr; memset(sghid->sig_array, 0, sghid->sig_size); @@ -379,6 +383,24 @@ int SigGroupHeadClearSigs(SigGroupHead *sgh) return 0; } +#ifdef __SSE2__ +#include +static void MergeBitarrays(const uint8_t *src, uint8_t *dst, const uint32_t size) +{ +#define BYTES 16 + const uint8_t *srcptr = src; + uint8_t *dstptr = dst; + for (uint32_t i = 0; i < size; i += 16) { + __m128i s = _mm_load_si128((const __m128i *)srcptr); + __m128i d = _mm_load_si128((const __m128i *)dstptr); + d = _mm_or_si128(s, d); + _mm_store_si128((__m128i *)dstptr, d); + srcptr += BYTES; + dstptr += BYTES; + } +} +#endif + /** * \brief Copies the bitarray holding the sids from the source SigGroupHead to * the destination SigGroupHead. @@ -392,8 +414,6 @@ int SigGroupHeadClearSigs(SigGroupHead *sgh) */ int SigGroupHeadCopySigs(DetectEngineCtx *de_ctx, SigGroupHead *src, SigGroupHead **dst) { - uint32_t idx = 0; - if (src == NULL || de_ctx == NULL) return 0; @@ -402,11 +422,15 @@ int SigGroupHeadCopySigs(DetectEngineCtx *de_ctx, SigGroupHead *src, SigGroupHea if (*dst == NULL) goto error; } + DEBUG_VALIDATE_BUG_ON(src->init->sig_size != (*dst)->init->sig_size); +#ifdef __SSE2__ + MergeBitarrays(src->init->sig_array, (*dst)->init->sig_array, src->init->sig_size); +#else /* do the copy */ - for (idx = 0; idx < src->init->sig_size; idx++) + for (uint32_t idx = 0; idx < src->init->sig_size; idx++) (*dst)->init->sig_array[idx] = (*dst)->init->sig_array[idx] | src->init->sig_array[idx]; - +#endif if (src->init->whitelist) (*dst)->init->whitelist = MAX((*dst)->init->whitelist, src->init->whitelist); @@ -416,6 +440,24 @@ int SigGroupHeadCopySigs(DetectEngineCtx *de_ctx, SigGroupHead *src, SigGroupHea return -1; } +#ifdef HAVE_POPCNT64 +#include +static uint32_t Popcnt(const uint8_t *array, const uint32_t size) +{ + /* input needs to be a multiple of 8 for u64 casts to work */ + DEBUG_VALIDATE_BUG_ON(size < 8); + DEBUG_VALIDATE_BUG_ON(size % 8); + + uint32_t cnt = 0; + uint64_t *ptr = (uint64_t *)array; + for (uint64_t idx = 0; idx < size; idx += 8) { + cnt += _popcnt64(*ptr); + ptr++; + } + return cnt; +} +#endif + /** * \brief Updates the SigGroupHead->sig_cnt with the total count of all the * Signatures present in this SigGroupHead. @@ -426,17 +468,41 @@ int SigGroupHeadCopySigs(DetectEngineCtx *de_ctx, SigGroupHead *src, SigGroupHea */ void SigGroupHeadSetSigCnt(SigGroupHead *sgh, uint32_t max_idx) { - uint32_t sig; - - sgh->init->sig_cnt = 0; - for (sig = 0; sig < max_idx + 1; sig++) { +#ifdef HAVE_POPCNT64 + sgh->init->sig_cnt = Popcnt(sgh->init->sig_array, sgh->init->sig_size); +#else + uint32_t cnt = 0; + for (uint32_t sig = 0; sig < max_idx + 1; sig++) { if (sgh->init->sig_array[sig / 8] & (1 << (sig % 8))) - sgh->init->sig_cnt++; + cnt++; } - + sgh->init->sig_cnt = cnt; +#endif return; } +/** + * \brief Finds if two Signature Group Heads are the same. + * + * \param sgha First SGH to be compared + * \param sghb Secornd SGH to be compared + * + * \return true if they're a match, false otherwise + */ +bool SigGroupHeadEqual(const SigGroupHead *sgha, const SigGroupHead *sghb) +{ + if (sgha == NULL || sghb == NULL) + return false; + + if (sgha->init->sig_size != sghb->init->sig_size) + return false; + + if (SCMemcmp(sgha->init->sig_array, sghb->init->sig_array, sgha->init->sig_size) != 0) + return false; + + return true; +} + void SigGroupHeadSetProtoAndDirection(SigGroupHead *sgh, uint8_t ipproto, int dir) { @@ -637,6 +703,8 @@ void SigGroupHeadSetFilestoreCount(DetectEngineCtx *de_ctx, SigGroupHead *sgh) continue; if (SignatureIsFilestoring(s)) { + // should be insured by caller that we do not overflow + DEBUG_VALIDATE_BUG_ON(sgh->filestore_cnt == UINT16_MAX); sgh->filestore_cnt++; } } diff --git a/src/detect-engine-siggroup.h b/src/detect-engine-siggroup.h index d4c9e93c6771..456979aed2fd 100644 --- a/src/detect-engine-siggroup.h +++ b/src/detect-engine-siggroup.h @@ -42,6 +42,7 @@ int SigGroupHeadHashRemove(DetectEngineCtx *, SigGroupHead *); void SigGroupHeadInitDataFree(SigGroupHeadInitData *sghid); void SigGroupHeadSetSigCnt(SigGroupHead *sgh, uint32_t max_idx); +bool SigGroupHeadEqual(const SigGroupHead *, const SigGroupHead *); void SigGroupHeadSetProtoAndDirection(SigGroupHead *sgh, uint8_t ipproto, int dir); int SigGroupHeadBuildMatchArray(DetectEngineCtx *de_ctx, SigGroupHead *sgh, uint32_t max_idx); diff --git a/src/detect-engine.c b/src/detect-engine.c index d8f9f1880e56..c86366314513 100644 --- a/src/detect-engine.c +++ b/src/detect-engine.c @@ -1690,8 +1690,8 @@ void InspectionBufferCopy(InspectionBuffer *buffer, uint8_t *buf, uint32_t buf_l * transform may validate that it's compatible with the transform. * * When a transform indicates the byte array is incompatible, none of the - * subsequent transforms, if any, are invoked. This means the first positive - * validation result terminates the loop. + * subsequent transforms, if any, are invoked. This means the first validation + * failure terminates the loop. * * \param de_ctx Detection engine context. * \param sm_list The SM list id. @@ -2669,6 +2669,10 @@ void DetectEngineCtxFree(DetectEngineCtx *de_ctx) SCFree(de_ctx->tenant_path); } + if (de_ctx->requirements) { + SCDetectRequiresStatusFree(de_ctx->requirements); + } + SCFree(de_ctx); //DetectAddressGroupPrintMemory(); //DetectSigGroupPrintMemory(); diff --git a/src/detect-fast-pattern.c b/src/detect-fast-pattern.c index b82f3274d709..ef6007a44eca 100644 --- a/src/detect-fast-pattern.c +++ b/src/detect-fast-pattern.c @@ -274,6 +274,9 @@ static int DetectFastPatternSetup(DetectEngineCtx *de_ctx, Signature *s, const c } } } + if (SigMatchListSMBelongsTo(s, pm) == DETECT_SM_LIST_BASE64_DATA) { + SCLogInfo("fast_pattern is ineffective with base64_data"); + } cd->flags |= DETECT_CONTENT_FAST_PATTERN; return 0; } diff --git a/src/detect-file-data.c b/src/detect-file-data.c index e26654e8b9e8..d47087e2fb97 100644 --- a/src/detect-file-data.c +++ b/src/detect-file-data.c @@ -73,7 +73,7 @@ void DetectFiledataRegister(void) sigmatch_table[DETECT_FILE_DATA].name = "file.data"; sigmatch_table[DETECT_FILE_DATA].alias = "file_data"; sigmatch_table[DETECT_FILE_DATA].desc = "make content keywords match on file data"; - sigmatch_table[DETECT_FILE_DATA].url = "/rules/http-keywords.html#file-data"; + sigmatch_table[DETECT_FILE_DATA].url = "/rules/file-keywords.html#file-data"; sigmatch_table[DETECT_FILE_DATA].Setup = DetectFiledataSetup; #ifdef UNITTESTS sigmatch_table[DETECT_FILE_DATA].RegisterTests = DetectFiledataRegisterTests; diff --git a/src/detect-filesize.c b/src/detect-filesize.c index 05caba8568c1..616ff9dbd941 100644 --- a/src/detect-filesize.c +++ b/src/detect-filesize.c @@ -127,7 +127,7 @@ static int DetectFilesizeSetup (DetectEngineCtx *de_ctx, Signature *s, const cha fsd = DetectU64Parse(str); if (fsd == NULL) - goto error; + SCReturnInt(-1); sm = SigMatchAlloc(); if (sm == NULL) diff --git a/src/detect-filestore.c b/src/detect-filestore.c index c53a93d78dd2..c905f9b88d48 100644 --- a/src/detect-filestore.c +++ b/src/detect-filestore.c @@ -118,7 +118,8 @@ static int FilestorePostMatchWithOptions(Packet *p, Flow *f, const DetectFilesto switch (filestore->direction) { case FILESTORE_DIR_DEFAULT: rule_dir = 1; - break; + // will use both sides if scope is not default + // fallthrough case FILESTORE_DIR_BOTH: toserver_dir = 1; toclient_dir = 1; @@ -160,16 +161,28 @@ static int FilestorePostMatchWithOptions(Packet *p, Flow *f, const DetectFilesto AppLayerTxData *txd = AppLayerParserGetTxData(f->proto, f->alproto, txv); DEBUG_VALIDATE_BUG_ON(txd == NULL); if (txd != NULL) { - txd->file_flags |= FLOWFILE_STORE; + if (toclient_dir) { + txd->file_flags |= FLOWFILE_STORE_TC; + } + if (toserver_dir) { + txd->file_flags |= FLOWFILE_STORE_TS; + } } } } else if (this_flow) { /* set in flow and AppLayerStateData */ - f->file_flags |= FLOWFILE_STORE; - AppLayerStateData *sd = AppLayerParserGetStateData(f->proto, f->alproto, f->alstate); - if (sd != NULL) { - sd->file_flags |= FLOWFILE_STORE; + if (toclient_dir) { + f->file_flags |= FLOWFILE_STORE_TC; + if (sd != NULL) { + sd->file_flags |= FLOWFILE_STORE_TC; + } + } + if (toserver_dir) { + f->file_flags |= FLOWFILE_STORE_TS; + if (sd != NULL) { + sd->file_flags |= FLOWFILE_STORE_TS; + } } } else { FileStoreFileById(fc, file_id); @@ -333,6 +346,11 @@ static int DetectFilestoreSetup (DetectEngineCtx *de_ctx, Signature *s, const ch static bool warn_not_configured = false; static uint32_t de_version = 0; + if (de_ctx->filestore_cnt == UINT16_MAX) { + SCLogError("Cannot have more than 65535 filestore signatures"); + return -1; + } + /* Check on first-time loads (includes following a reload) */ if (!warn_not_configured || (de_ctx->version != de_version)) { if (de_version != de_ctx->version) { @@ -476,6 +494,7 @@ static int DetectFilestoreSetup (DetectEngineCtx *de_ctx, Signature *s, const ch SigMatchAppendSMToList(s, sm, DETECT_SM_LIST_POSTMATCH); s->flags |= SIG_FLAG_FILESTORE; + de_ctx->filestore_cnt++; if (match) pcre2_match_data_free(match); diff --git a/src/detect-flowbits.c b/src/detect-flowbits.c index 144eb89f8849..358bd7e0b1d1 100644 --- a/src/detect-flowbits.c +++ b/src/detect-flowbits.c @@ -26,6 +26,7 @@ #include "suricata-common.h" #include "decode.h" +#include "action-globals.h" #include "detect.h" #include "threads.h" #include "flow.h" @@ -286,7 +287,10 @@ int DetectFlowbitSetup (DetectEngineCtx *de_ctx, Signature *s, const char *rawst } if (strcmp(fb_cmd_str,"noalert") == 0) { - fb_cmd = DETECT_FLOWBITS_CMD_NOALERT; + if (strlen(fb_name) != 0) + goto error; + s->action &= ~ACTION_ALERT; + return 0; } else if (strcmp(fb_cmd_str,"isset") == 0) { fb_cmd = DETECT_FLOWBITS_CMD_ISSET; } else if (strcmp(fb_cmd_str,"isnotset") == 0) { @@ -303,11 +307,6 @@ int DetectFlowbitSetup (DetectEngineCtx *de_ctx, Signature *s, const char *rawst } switch (fb_cmd) { - case DETECT_FLOWBITS_CMD_NOALERT: - if (strlen(fb_name) != 0) - goto error; - s->flags |= SIG_FLAG_NOALERT; - return 0; case DETECT_FLOWBITS_CMD_ISNOTSET: case DETECT_FLOWBITS_CMD_ISSET: case DETECT_FLOWBITS_CMD_SET: @@ -347,8 +346,7 @@ int DetectFlowbitSetup (DetectEngineCtx *de_ctx, Signature *s, const char *rawst sm->ctx = (SigMatchCtx *)cd; switch (fb_cmd) { - /* case DETECT_FLOWBITS_CMD_NOALERT can't happen here */ - + /* noalert can't happen here */ case DETECT_FLOWBITS_CMD_ISNOTSET: case DETECT_FLOWBITS_CMD_ISSET: /* checks, so packet list */ @@ -933,7 +931,7 @@ static int FlowBitsTestSig05(void) s = de_ctx->sig_list = SigInit(de_ctx,"alert ip any any -> any any (msg:\"Noalert\"; flowbits:noalert; content:\"GET \"; sid:1;)"); FAIL_IF_NULL(s); - FAIL_IF((s->flags & SIG_FLAG_NOALERT) != SIG_FLAG_NOALERT); + FAIL_IF((s->action & ACTION_ALERT) != 0); SigGroupBuild(de_ctx); DetectEngineCtxFree(de_ctx); diff --git a/src/detect-flowbits.h b/src/detect-flowbits.h index 5ecd6cf87296..5e382de0a7a6 100644 --- a/src/detect-flowbits.h +++ b/src/detect-flowbits.h @@ -30,8 +30,7 @@ #define DETECT_FLOWBITS_CMD_UNSET 2 #define DETECT_FLOWBITS_CMD_ISNOTSET 3 #define DETECT_FLOWBITS_CMD_ISSET 4 -#define DETECT_FLOWBITS_CMD_NOALERT 5 -#define DETECT_FLOWBITS_CMD_MAX 6 +#define DETECT_FLOWBITS_CMD_MAX 5 typedef struct DetectFlowbitsData_ { uint32_t idx; diff --git a/src/detect-hostbits.c b/src/detect-hostbits.c index 764bf62805c1..97ebd22a68e8 100644 --- a/src/detect-hostbits.c +++ b/src/detect-hostbits.c @@ -25,6 +25,7 @@ #include "suricata-common.h" #include "decode.h" +#include "action-globals.h" #include "detect.h" #include "threads.h" #include "flow.h" @@ -378,7 +379,7 @@ int DetectHostbitSetup (DetectEngineCtx *de_ctx, Signature *s, const char *rawst case DETECT_XBITS_CMD_NOALERT: if (strlen(fb_name) != 0) goto error; - s->flags |= SIG_FLAG_NOALERT; + s->action &= ~ACTION_ALERT; return 0; case DETECT_XBITS_CMD_ISNOTSET: case DETECT_XBITS_CMD_ISSET: diff --git a/src/detect-http-header.c b/src/detect-http-header.c index e5101f9276b0..cd36ea597cd5 100644 --- a/src/detect-http-header.c +++ b/src/detect-http-header.c @@ -48,6 +48,7 @@ #include "util-print.h" #include "util-memcmp.h" #include "util-profiling.h" +#include "util-validate.h" #include "app-layer.h" #include "app-layer-parser.h" @@ -467,6 +468,8 @@ void DetectHttpHeaderRegister(void) static int g_http_request_header_buffer_id = 0; static int g_http_response_header_buffer_id = 0; +static int g_request_header_thread_id = 0; +static int g_response_header_thread_id = 0; static InspectionBuffer *GetHttp2HeaderData(DetectEngineThreadCtx *det_ctx, const uint8_t flags, const DetectEngineTransforms *transforms, Flow *_f, const struct MpmListIdDataArgs *cbdata, @@ -580,6 +583,43 @@ static int PrefilterMpmHttp2HeaderRegister(DetectEngineCtx *de_ctx, SigGroupHead mpm_reg->app_v2.tx_min_progress, pectx, PrefilterMpmHttpHeaderFree, mpm_reg->name); } +typedef struct HttpMultiBufItem { + uint8_t *buffer; + size_t len; +} HttpMultiBufItem; + +typedef struct HttpMultiBufHeaderThreadData { + // array of items, being defined as a buffer with its length just above + HttpMultiBufItem *items; + // capacity of items (size of allocation) + size_t cap; + // length of items (number in use) + size_t len; +} HttpMultiBufHeaderThreadData; + +static void *HttpMultiBufHeaderThreadDataInit(void *data) +{ + HttpMultiBufHeaderThreadData *td = SCCalloc(1, sizeof(*td)); + + /* This return value check to satisfy our Cocci malloc checks. */ + if (td == NULL) { + SCLogError("failed to allocate %" PRIuMAX " bytes: %s", (uintmax_t)sizeof(*td), + strerror(errno)); + return NULL; + } + return td; +} + +static void HttpMultiBufHeaderThreadDataFree(void *data) +{ + HttpMultiBufHeaderThreadData *td = data; + for (size_t i = 0; i < td->cap; i++) { + SCFree(td->items[i].buffer); + } + SCFree(td->items); + SCFree(td); +} + static InspectionBuffer *GetHttp1HeaderData(DetectEngineThreadCtx *det_ctx, const uint8_t flags, const DetectEngineTransforms *transforms, Flow *f, const struct MpmListIdDataArgs *cbdata, int list_id) @@ -593,10 +633,15 @@ static InspectionBuffer *GetHttp1HeaderData(DetectEngineThreadCtx *det_ctx, cons if (buffer->initialized) return buffer; - HttpHeaderThreadData *hdr_td = NULL; - HttpHeaderBuffer *buf = - HttpHeaderGetBufferSpace(det_ctx, f, flags, g_keyword_thread_id, &hdr_td); - if (unlikely(buf == NULL)) { + int kw_thread_id; + if (flags & STREAM_TOSERVER) { + kw_thread_id = g_request_header_thread_id; + } else { + kw_thread_id = g_response_header_thread_id; + } + HttpMultiBufHeaderThreadData *hdr_td = + DetectThreadCtxGetGlobalKeywordThreadCtx(det_ctx, kw_thread_id); + if (unlikely(hdr_td == NULL)) { return NULL; } @@ -607,33 +652,54 @@ static InspectionBuffer *GetHttp1HeaderData(DetectEngineThreadCtx *det_ctx, cons } else { headers = tx->response_headers; } - if (cbdata->local_id < htp_table_size(headers)) { - htp_header_t *h = htp_table_get_index(headers, cbdata->local_id, NULL); - size_t size1 = bstr_size(h->name); - size_t size2 = bstr_size(h->value); - size_t b_len = size1 + 2 + size2; - if (b_len > buf->size) { - if (HttpHeaderExpandBuffer(hdr_td, buf, b_len) != 0) { + size_t no_of_headers = htp_table_size(headers); + if (cbdata->local_id == 0) { + // We initialize a big buffer on first item + // Then, we will just use parts of it + hdr_td->len = 0; + if (hdr_td->cap < no_of_headers) { + void *new_buffer = SCRealloc(hdr_td->items, no_of_headers * sizeof(HttpMultiBufItem)); + if (unlikely(new_buffer == NULL)) { return NULL; } + hdr_td->items = new_buffer; + // zeroes the new part of the items + memset(hdr_td->items + hdr_td->cap, 0, + (no_of_headers - hdr_td->cap) * sizeof(HttpMultiBufItem)); + hdr_td->cap = no_of_headers; } - memcpy(buf->buffer, bstr_ptr(h->name), bstr_size(h->name)); - buf->buffer[size1] = ':'; - buf->buffer[size1 + 1] = ' '; - memcpy(buf->buffer + size1 + 2, bstr_ptr(h->value), bstr_size(h->value)); - buf->len = b_len; - } else { - InspectionBufferSetupMultiEmpty(buffer); - return NULL; - } - if (buf->len == 0) { - InspectionBufferSetupMultiEmpty(buffer); - return NULL; + for (size_t i = 0; i < no_of_headers; i++) { + htp_header_t *h = htp_table_get_index(headers, i, NULL); + size_t size1 = bstr_size(h->name); + size_t size2 = bstr_size(h->value); + size_t size = size1 + size2 + 2; + if (hdr_td->items[i].len < size) { + // Use realloc, as this pointer is not freed until HttpMultiBufHeaderThreadDataFree + void *tmp = SCRealloc(hdr_td->items[i].buffer, size); + if (unlikely(tmp == NULL)) { + return NULL; + } + hdr_td->items[i].buffer = tmp; + } + memcpy(hdr_td->items[i].buffer, bstr_ptr(h->name), size1); + hdr_td->items[i].buffer[size1] = ':'; + hdr_td->items[i].buffer[size1 + 1] = ' '; + memcpy(hdr_td->items[i].buffer + size1 + 2, bstr_ptr(h->value), size2); + hdr_td->items[i].len = size; + } + hdr_td->len = no_of_headers; } - InspectionBufferSetupMulti(buffer, transforms, buf->buffer, buf->len); - - SCReturnPtr(buffer, "InspectionBuffer"); + // cbdata->local_id is the index of the requested header buffer + // hdr_td->len is the number of header buffers + if (cbdata->local_id < hdr_td->len) { + // we have one valid header buffer + InspectionBufferSetupMulti(buffer, transforms, hdr_td->items[cbdata->local_id].buffer, + hdr_td->items[cbdata->local_id].len); + SCReturnPtr(buffer, "InspectionBuffer"); + } // else there are no more header buffer to get + InspectionBufferSetupMultiEmpty(buffer); + return NULL; } static void PrefilterTxHttp1Header(DetectEngineThreadCtx *det_ctx, const void *pectx, Packet *p, @@ -751,6 +817,8 @@ void DetectHttpRequestHeaderRegister(void) DetectBufferTypeSetDescriptionByName("http_request_header", "HTTP header name and value"); g_http_request_header_buffer_id = DetectBufferTypeGetByName("http_request_header"); DetectBufferTypeSupportsMultiInstance("http_request_header"); + g_request_header_thread_id = DetectRegisterThreadCtxGlobalFuncs("http_request_header", + HttpMultiBufHeaderThreadDataInit, NULL, HttpMultiBufHeaderThreadDataFree); } static int DetectHTTPResponseHeaderSetup(DetectEngineCtx *de_ctx, Signature *s, const char *arg) @@ -786,6 +854,8 @@ void DetectHttpResponseHeaderRegister(void) DetectBufferTypeSetDescriptionByName("http_response_header", "HTTP header name and value"); g_http_response_header_buffer_id = DetectBufferTypeGetByName("http_response_header"); DetectBufferTypeSupportsMultiInstance("http_response_header"); + g_response_header_thread_id = DetectRegisterThreadCtxGlobalFuncs("http_response_header", + HttpMultiBufHeaderThreadDataInit, NULL, HttpMultiBufHeaderThreadDataFree); } /************************************Unittests*********************************/ diff --git a/src/detect-http-server-body.c b/src/detect-http-server-body.c index 98f0ec581e94..28833a8a75bf 100644 --- a/src/detect-http-server-body.c +++ b/src/detect-http-server-body.c @@ -124,6 +124,9 @@ static int DetectHttpServerBodySetupSticky(DetectEngineCtx *de_ctx, Signature *s return -1; if (DetectSignatureSetAppProto(s, ALPROTO_HTTP) < 0) return -1; + // file data is on both directions, but we only take the one to client here + s->flags |= SIG_FLAG_TOCLIENT; + s->flags &= ~SIG_FLAG_TOSERVER; return 0; } diff --git a/src/detect-icode.c b/src/detect-icode.c index 3a601c286df7..e9616873ca9d 100644 --- a/src/detect-icode.c +++ b/src/detect-icode.c @@ -121,7 +121,8 @@ static int DetectICodeSetup(DetectEngineCtx *de_ctx, Signature *s, const char *i SigMatch *sm = NULL; icd = DetectU8Parse(icodestr); - if (icd == NULL) goto error; + if (icd == NULL) + return -1; sm = SigMatchAlloc(); if (sm == NULL) goto error; @@ -314,7 +315,6 @@ static int DetectICodeParseTest08(void) DetectU8Data *icd = DetectU8Parse("> 8 <> 20"); FAIL_IF_NOT_NULL(icd); - DetectICodeFree(NULL, icd); PASS; } @@ -327,7 +327,6 @@ static int DetectICodeParseTest09(void) DetectU8Data *icd = DetectU8Parse("8<<20"); FAIL_IF_NOT_NULL(icd); - DetectICodeFree(NULL, icd); PASS; } diff --git a/src/detect-ipopts.c b/src/detect-ipopts.c index 105751c388a4..01b4712691f9 100644 --- a/src/detect-ipopts.c +++ b/src/detect-ipopts.c @@ -25,23 +25,13 @@ #include "suricata-common.h" #include "suricata.h" -#include "decode.h" #include "detect.h" #include "detect-parse.h" -#include "flow-var.h" -#include "decode-events.h" - -#include "util-debug.h" - #include "detect-ipopts.h" #include "util-unittest.h" -#define PARSE_REGEX "\\S[A-z]" - -static DetectParseRegex parse_regex; - static int DetectIpOptsMatch (DetectEngineThreadCtx *, Packet *, const Signature *, const SigMatchCtx *); static int DetectIpOptsSetup (DetectEngineCtx *, Signature *, const char *); @@ -64,7 +54,6 @@ void DetectIpOptsRegister (void) #ifdef UNITTESTS sigmatch_table[DETECT_IPOPTS].RegisterTests = IpOptsRegisterTests; #endif - DetectSetupParseRegexes(PARSE_REGEX, &parse_regex); } /** @@ -173,11 +162,7 @@ static int DetectIpOptsMatch (DetectEngineThreadCtx *det_ctx, Packet *p, if (!de || !PKT_IS_IPV4(p) || PKT_IS_PSEUDOPKT(p)) return 0; - if (p->ip4vars.opts_set & de->ipopt) { - return 1; - } - - return 0; + return (p->ip4vars.opts_set & de->ipopt) == de->ipopt; } /** @@ -191,42 +176,30 @@ static int DetectIpOptsMatch (DetectEngineThreadCtx *det_ctx, Packet *p, */ static DetectIpOptsData *DetectIpOptsParse (const char *rawstr) { - int i; - DetectIpOptsData *de = NULL; - int found = 0; - - pcre2_match_data *match = NULL; - int ret = DetectParsePcreExec(&parse_regex, &match, rawstr, 0, 0); - if (ret < 1) { - SCLogError("pcre_exec parse error, ret %" PRId32 ", string %s", ret, rawstr); - goto error; - } + if (rawstr == NULL || strlen(rawstr) == 0) + return NULL; + int i; + bool found = false; for(i = 0; ipopts[i].ipopt_name != NULL; i++) { if((strcasecmp(ipopts[i].ipopt_name,rawstr)) == 0) { - found = 1; + found = true; break; } } - if(found == 0) - goto error; + if (!found) { + SCLogError("unknown IP option specified \"%s\"", rawstr); + return NULL; + } - de = SCMalloc(sizeof(DetectIpOptsData)); + DetectIpOptsData *de = SCMalloc(sizeof(DetectIpOptsData)); if (unlikely(de == NULL)) - goto error; + return NULL; de->ipopt = ipopts[i].code; - pcre2_match_data_free(match); return de; - -error: - if (match) { - pcre2_match_data_free(match); - } - if (de) SCFree(de); - return NULL; } /** @@ -242,10 +215,8 @@ static DetectIpOptsData *DetectIpOptsParse (const char *rawstr) */ static int DetectIpOptsSetup (DetectEngineCtx *de_ctx, Signature *s, const char *rawstr) { - DetectIpOptsData *de = NULL; SigMatch *sm = NULL; - - de = DetectIpOptsParse(rawstr); + DetectIpOptsData *de = DetectIpOptsParse(rawstr); if (de == NULL) goto error; @@ -275,8 +246,9 @@ static int DetectIpOptsSetup (DetectEngineCtx *de_ctx, Signature *s, const char */ void DetectIpOptsFree(DetectEngineCtx *de_ctx, void *de_ptr) { - DetectIpOptsData *de = (DetectIpOptsData *)de_ptr; - if(de) SCFree(de); + if (de_ptr) { + SCFree(de_ptr); + } } /* @@ -380,6 +352,20 @@ static int IpOptsTestParse04 (void) PASS; } +/** + * \test IpOptsTestParse05 tests the NULL and empty string + */ +static int IpOptsTestParse05(void) +{ + DetectIpOptsData *de = DetectIpOptsParse(""); + FAIL_IF_NOT_NULL(de); + + de = DetectIpOptsParse(NULL); + FAIL_IF_NOT_NULL(de); + + PASS; +} + /** * \brief this function registers unit tests for IpOpts */ @@ -389,5 +375,6 @@ void IpOptsRegisterTests(void) UtRegisterTest("IpOptsTestParse02", IpOptsTestParse02); UtRegisterTest("IpOptsTestParse03", IpOptsTestParse03); UtRegisterTest("IpOptsTestParse04", IpOptsTestParse04); + UtRegisterTest("IpOptsTestParse05", IpOptsTestParse05); } #endif /* UNITTESTS */ diff --git a/src/detect-iprep.c b/src/detect-iprep.c index cc3d9a04c9bc..3b71f1ca75c6 100644 --- a/src/detect-iprep.c +++ b/src/detect-iprep.c @@ -75,65 +75,66 @@ void DetectIPRepRegister (void) sigmatch_table[DETECT_IPREP].flags |= SIGMATCH_IPONLY_COMPAT; } -static inline uint8_t GetRep(const SReputation *r, const uint8_t cat, const uint32_t version) +static inline int8_t GetRep(const SReputation *r, const uint8_t cat, const uint32_t version) { /* allow higher versions as this happens during * rule reload */ if (r != NULL && r->version >= version) { return r->rep[cat]; } - return 0; + return -1; } -static uint8_t GetHostRepSrc(Packet *p, uint8_t cat, uint32_t version) +/** \returns: -2 no host, -1 no rep entry, 0-127 rep values */ +static int8_t GetHostRepSrc(Packet *p, uint8_t cat, uint32_t version) { if (p->flags & PKT_HOST_SRC_LOOKED_UP && p->host_src == NULL) { - return 0; + return -2; } else if (p->host_src != NULL) { Host *h = (Host *)p->host_src; HostLock(h); /* use_cnt: 1 for having iprep, 1 for packet ref */ DEBUG_VALIDATE_BUG_ON(h->iprep != NULL && SC_ATOMIC_GET(h->use_cnt) < 2); - uint8_t val = GetRep(h->iprep, cat, version); + int8_t val = GetRep(h->iprep, cat, version); HostUnlock(h); return val; } else { Host *h = HostLookupHostFromHash(&(p->src)); p->flags |= PKT_HOST_SRC_LOOKED_UP; if (h == NULL) - return 0; + return -2; HostReference(&p->host_src, h); /* use_cnt: 1 for having iprep, 1 for HostLookupHostFromHash, * 1 for HostReference to packet */ DEBUG_VALIDATE_BUG_ON(h->iprep != NULL && SC_ATOMIC_GET(h->use_cnt) < 3); - uint8_t val = GetRep(h->iprep, cat, version); + int8_t val = GetRep(h->iprep, cat, version); HostRelease(h); /* use_cnt >= 2: 1 for iprep, 1 for packet ref */ return val; } } -static uint8_t GetHostRepDst(Packet *p, uint8_t cat, uint32_t version) +static int8_t GetHostRepDst(Packet *p, uint8_t cat, uint32_t version) { if (p->flags & PKT_HOST_DST_LOOKED_UP && p->host_dst == NULL) { - return 0; + return -2; } else if (p->host_dst != NULL) { Host *h = (Host *)p->host_dst; HostLock(h); /* use_cnt: 1 for having iprep, 1 for packet ref */ DEBUG_VALIDATE_BUG_ON(h->iprep != NULL && SC_ATOMIC_GET(h->use_cnt) < 2); - uint8_t val = GetRep(h->iprep, cat, version); + int8_t val = GetRep(h->iprep, cat, version); HostUnlock(h); return val; } else { Host *h = HostLookupHostFromHash(&(p->dst)); p->flags |= PKT_HOST_DST_LOOKED_UP; if (h == NULL) - return 0; + return -2; HostReference(&p->host_dst, h); /* use_cnt: 1 for having iprep, 1 for HostLookupHostFromHash, * 1 for HostReference to packet */ DEBUG_VALIDATE_BUG_ON(h->iprep != NULL && SC_ATOMIC_GET(h->use_cnt) < 3); - uint8_t val = GetRep(h->iprep, cat, version); + int8_t val = GetRep(h->iprep, cat, version); HostRelease(h); /* use_cnt >= 2: 1 for iprep, 1 for packet ref */ return val; } @@ -152,58 +153,101 @@ static int DetectIPRepMatch (DetectEngineThreadCtx *det_ctx, Packet *p, return 0; uint32_t version = det_ctx->de_ctx->srep_version; - uint8_t val = 0; + int8_t val = 0; SCLogDebug("rd->cmd %u", rd->cmd); - switch(rd->cmd) { + switch (rd->cmd) { case IPRepCmdAny: - val = GetHostRepSrc(p, rd->cat, version); - if (val == 0) - val = SRepCIDRGetIPRepSrc(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); - if (val > 0) { - if (DetectU8Match(val, &rd->du8)) + if (!rd->isnotset) { + val = GetHostRepSrc(p, rd->cat, version); + if (val < 0) + val = SRepCIDRGetIPRepSrc(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); + if (val >= 0) { + if (DetectU8Match((uint8_t)val, &rd->du8)) + return 1; + } + val = GetHostRepDst(p, rd->cat, version); + if (val < 0) + val = SRepCIDRGetIPRepDst(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); + if (val >= 0) { + return DetectU8Match((uint8_t)val, &rd->du8); + } + } else { + /* isnotset for any */ + + val = GetHostRepSrc(p, rd->cat, version); + if (val < 0) + val = SRepCIDRGetIPRepSrc(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); + if (val < 0) { return 1; - } - val = GetHostRepDst(p, rd->cat, version); - if (val == 0) - val = SRepCIDRGetIPRepDst(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); - if (val > 0) { - return DetectU8Match(val, &rd->du8); + } + val = GetHostRepDst(p, rd->cat, version); + if (val < 0) + val = SRepCIDRGetIPRepDst(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); + if (val < 0) { + return 1; + } + /* both have a value, so none 'isnotset' */ + return 0; } break; case IPRepCmdSrc: val = GetHostRepSrc(p, rd->cat, version); - SCLogDebug("checking src -- val %u (looking for cat %u, val %u)", val, rd->cat, + SCLogDebug("checking src -- val %d (looking for cat %u, val %u)", val, rd->cat, rd->du8.arg1); - if (val == 0) + if (val < 0) val = SRepCIDRGetIPRepSrc(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); - if (val > 0) { - return DetectU8Match(val, &rd->du8); + if (val >= 0) { + return DetectU8Match((uint8_t)val, &rd->du8); + } + /* implied: no value found */ + if (rd->isnotset) { + return 1; } break; case IPRepCmdDst: SCLogDebug("checking dst"); val = GetHostRepDst(p, rd->cat, version); - if (val == 0) + if (val < 0) val = SRepCIDRGetIPRepDst(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); - if (val > 0) { - return DetectU8Match(val, &rd->du8); + if (val >= 0) { + return DetectU8Match((uint8_t)val, &rd->du8); + } + /* implied: no value found */ + if (rd->isnotset) { + return 1; } break; case IPRepCmdBoth: - val = GetHostRepSrc(p, rd->cat, version); - if (val == 0) + if (!rd->isnotset) { + val = GetHostRepSrc(p, rd->cat, version); + if (val < 0) + val = SRepCIDRGetIPRepSrc(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); + if (val < 0 || DetectU8Match((uint8_t)val, &rd->du8) == 0) + return 0; + val = GetHostRepDst(p, rd->cat, version); + if (val < 0) + val = SRepCIDRGetIPRepDst(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); + if (val >= 0) { + return DetectU8Match((uint8_t)val, &rd->du8); + } + } else { + val = GetHostRepSrc(p, rd->cat, version); + if (val >= 0) + return 0; val = SRepCIDRGetIPRepSrc(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); - if (val == 0 || DetectU8Match(val, &rd->du8) == 0) - return 0; - val = GetHostRepDst(p, rd->cat, version); - if (val == 0) + if (val >= 0) + return 0; + val = GetHostRepDst(p, rd->cat, version); + if (val >= 0) + return 0; val = SRepCIDRGetIPRepDst(det_ctx->de_ctx->srepCIDR_ctx, p, rd->cat, version); - if (val > 0) { - return DetectU8Match(val, &rd->du8); + if (val >= 0) + return 0; + return 1; } break; } @@ -247,7 +291,6 @@ int DetectIPRepSetup (DetectEngineCtx *de_ctx, Signature *s, const char *rawstr) void DetectIPRepFree (DetectEngineCtx *de_ctx, void *ptr) { DetectIPRepData *fd = (DetectIPRepData *)ptr; - if (fd == NULL) return; @@ -782,6 +825,118 @@ static int DetectIPRepTest09(void) PASS; } +static FILE *DetectIPRepGenerateNetworksDummy3(void) +{ + FILE *fd = NULL; + const char *buffer = "192.168.0.0/16,1,127"; // BadHosts + + fd = SCFmemopen((void *)buffer, strlen(buffer), "r"); + if (fd == NULL) + SCLogDebug("Error with SCFmemopen()"); + + return fd; +} + +static int DetectIPRepTest10(void) +{ + ThreadVars th_v; + DetectEngineThreadCtx *det_ctx = NULL; + Signature *sig = NULL; + FILE *fd = NULL; + int r = 0; + Packet *p = UTHBuildPacket((uint8_t *)"lalala", 6, IPPROTO_TCP); + DetectEngineCtx *de_ctx = DetectEngineCtxInit(); + + HostInitConfig(HOST_QUIET); + memset(&th_v, 0, sizeof(th_v)); + + FAIL_IF_NULL(de_ctx); + FAIL_IF_NULL(p); + + p->src.addr_data32[0] = UTHSetIPv4Address("192.168.0.1"); + p->dst.addr_data32[0] = UTHSetIPv4Address("192.168.0.2"); + de_ctx->flags |= DE_QUIET; + + SRepInit(de_ctx); + SRepResetVersion(); + + fd = DetectIPRepGenerateCategoriesDummy2(); + r = SRepLoadCatFileFromFD(fd); + FAIL_IF(r < 0); + + fd = DetectIPRepGenerateNetworksDummy3(); + r = SRepLoadFileFromFD(de_ctx->srepCIDR_ctx, fd); + FAIL_IF(r < 0); + + sig = DetectEngineAppendSig(de_ctx, + "alert tcp any any -> any any (msg:\"test\"; iprep:src,BadHosts,isset; sid:1; rev:1;)"); + FAIL_IF_NULL(sig); + + SigGroupBuild(de_ctx); + DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); + + SigMatchSignatures(&th_v, de_ctx, det_ctx, p); + FAIL_IF_NOT(p->alerts.cnt == 1); + + UTHFreePacket(p); + + DetectEngineThreadCtxDeinit(&th_v, (void *)det_ctx); + DetectEngineCtxFree(de_ctx); + + HostShutdown(); + PASS; +} + +static int DetectIPRepTest11(void) +{ + ThreadVars th_v; + DetectEngineThreadCtx *det_ctx = NULL; + Signature *sig = NULL; + FILE *fd = NULL; + int r = 0; + Packet *p = UTHBuildPacket((uint8_t *)"lalala", 6, IPPROTO_TCP); + DetectEngineCtx *de_ctx = DetectEngineCtxInit(); + + HostInitConfig(HOST_QUIET); + memset(&th_v, 0, sizeof(th_v)); + + FAIL_IF_NULL(de_ctx); + FAIL_IF_NULL(p); + + p->src.addr_data32[0] = UTHSetIPv4Address("10.0.0.1"); + p->dst.addr_data32[0] = UTHSetIPv4Address("10.0.0.2"); + de_ctx->flags |= DE_QUIET; + + SRepInit(de_ctx); + SRepResetVersion(); + + fd = DetectIPRepGenerateCategoriesDummy2(); + r = SRepLoadCatFileFromFD(fd); + FAIL_IF(r < 0); + + fd = DetectIPRepGenerateNetworksDummy3(); + r = SRepLoadFileFromFD(de_ctx->srepCIDR_ctx, fd); + FAIL_IF(r < 0); + + sig = DetectEngineAppendSig(de_ctx, "alert tcp any any -> any any (msg:\"test\"; " + "iprep:src,BadHosts,isnotset; sid:1; rev:1;)"); + FAIL_IF_NULL(sig); + + SigGroupBuild(de_ctx); + DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); + + SigMatchSignatures(&th_v, de_ctx, det_ctx, p); + FAIL_IF_NOT(p->alerts.cnt == 1); + + UTHFreePacket(p); + + DetectEngineThreadCtxDeinit(&th_v, (void *)det_ctx); + DetectEngineCtxFree(de_ctx); + + HostShutdown(); + PASS; +} + /** * \brief this function registers unit tests for IPRep */ @@ -796,5 +951,7 @@ void IPRepRegisterTests(void) UtRegisterTest("DetectIPRepTest07", DetectIPRepTest07); UtRegisterTest("DetectIPRepTest08", DetectIPRepTest08); UtRegisterTest("DetectIPRepTest09", DetectIPRepTest09); + UtRegisterTest("DetectIPRepTest10 -- isset", DetectIPRepTest10); + UtRegisterTest("DetectIPRepTest11 -- isnotset", DetectIPRepTest11); } #endif /* UNITTESTS */ diff --git a/src/detect-itype.c b/src/detect-itype.c index d8168600f5d2..a5abcc970042 100644 --- a/src/detect-itype.c +++ b/src/detect-itype.c @@ -101,20 +101,6 @@ static int DetectITypeMatch (DetectEngineThreadCtx *det_ctx, Packet *p, return DetectU8Match(pitype, itd); } -/** - * \brief This function is used to parse itype options passed via itype: keyword - * - * \param de_ctx Pointer to the detection engine context - * \param itypestr Pointer to the user provided itype options - * - * \retval itd pointer to DetectU8Data on success - * \retval NULL on failure - */ -static DetectU8Data *DetectITypeParse(DetectEngineCtx *de_ctx, const char *itypestr) -{ - return DetectU8Parse(itypestr); -} - /** * \brief this function is used to add the parsed itype data into the current signature * @@ -131,8 +117,9 @@ static int DetectITypeSetup(DetectEngineCtx *de_ctx, Signature *s, const char *i DetectU8Data *itd = NULL; SigMatch *sm = NULL; - itd = DetectITypeParse(de_ctx, itypestr); - if (itd == NULL) goto error; + itd = DetectU8Parse(itypestr); + if (itd == NULL) + return -1; sm = SigMatchAlloc(); if (sm == NULL) goto error; @@ -221,7 +208,7 @@ static bool PrefilterITypeIsPrefilterable(const Signature *s) static int DetectITypeParseTest01(void) { DetectU8Data *itd = NULL; - itd = DetectITypeParse(NULL, "8"); + itd = DetectU8Parse("8"); FAIL_IF_NULL(itd); FAIL_IF_NOT(itd->arg1 == 8); FAIL_IF_NOT(itd->mode == DETECT_UINT_EQ); @@ -237,7 +224,7 @@ static int DetectITypeParseTest01(void) static int DetectITypeParseTest02(void) { DetectU8Data *itd = NULL; - itd = DetectITypeParse(NULL, ">8"); + itd = DetectU8Parse(">8"); FAIL_IF_NULL(itd); FAIL_IF_NOT(itd->arg1 == 8); FAIL_IF_NOT(itd->mode == DETECT_UINT_GT); @@ -253,7 +240,7 @@ static int DetectITypeParseTest02(void) static int DetectITypeParseTest03(void) { DetectU8Data *itd = NULL; - itd = DetectITypeParse(NULL, "<8"); + itd = DetectU8Parse("<8"); FAIL_IF_NULL(itd); FAIL_IF_NOT(itd->arg1 == 8); FAIL_IF_NOT(itd->mode == DETECT_UINT_LT); @@ -269,7 +256,7 @@ static int DetectITypeParseTest03(void) static int DetectITypeParseTest04(void) { DetectU8Data *itd = NULL; - itd = DetectITypeParse(NULL, "8<>20"); + itd = DetectU8Parse("8<>20"); FAIL_IF_NULL(itd); FAIL_IF_NOT(itd->arg1 == 8); FAIL_IF_NOT(itd->arg2 == 20); @@ -286,7 +273,7 @@ static int DetectITypeParseTest04(void) static int DetectITypeParseTest05(void) { DetectU8Data *itd = NULL; - itd = DetectITypeParse(NULL, " 8 "); + itd = DetectU8Parse(" 8 "); FAIL_IF_NULL(itd); FAIL_IF_NOT(itd->arg1 == 8); FAIL_IF_NOT(itd->mode == DETECT_UINT_EQ); @@ -302,7 +289,7 @@ static int DetectITypeParseTest05(void) static int DetectITypeParseTest06(void) { DetectU8Data *itd = NULL; - itd = DetectITypeParse(NULL, " > 8 "); + itd = DetectU8Parse(" > 8 "); FAIL_IF_NULL(itd); FAIL_IF_NOT(itd->arg1 == 8); FAIL_IF_NOT(itd->mode == DETECT_UINT_GT); @@ -318,7 +305,7 @@ static int DetectITypeParseTest06(void) static int DetectITypeParseTest07(void) { DetectU8Data *itd = NULL; - itd = DetectITypeParse(NULL, " 8 <> 20 "); + itd = DetectU8Parse(" 8 <> 20 "); FAIL_IF_NULL(itd); FAIL_IF_NOT(itd->arg1 == 8); FAIL_IF_NOT(itd->arg2 == 20); @@ -334,9 +321,8 @@ static int DetectITypeParseTest07(void) static int DetectITypeParseTest08(void) { DetectU8Data *itd = NULL; - itd = DetectITypeParse(NULL, "> 8 <> 20"); + itd = DetectU8Parse("> 8 <> 20"); FAIL_IF_NOT_NULL(itd); - DetectITypeFree(NULL, itd); PASS; } diff --git a/src/detect-ja4-hash.c b/src/detect-ja4-hash.c new file mode 100644 index 000000000000..e1d285264da0 --- /dev/null +++ b/src/detect-ja4-hash.c @@ -0,0 +1,180 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Sascha Steinbiss + * + * Implements support for ja4.hash keyword. + */ + +#include "suricata-common.h" +#include "threads.h" +#include "decode.h" +#include "detect.h" + +#include "detect-parse.h" +#include "detect-engine.h" +#include "detect-engine-mpm.h" +#include "detect-engine-prefilter.h" +#include "detect-ja4-hash.h" + +#include "util-ja4.h" + +#include "app-layer-ssl.h" + +#ifndef HAVE_JA4 +static int DetectJA4SetupNoSupport(DetectEngineCtx *a, Signature *b, const char *c) +{ + SCLogError("no JA4 support built in"); + return -1; +} +#endif /* HAVE_JA4 */ + +static int DetectJa4HashSetup(DetectEngineCtx *, Signature *, const char *); +static InspectionBuffer *GetData(DetectEngineThreadCtx *det_ctx, + const DetectEngineTransforms *transforms, Flow *f, const uint8_t flow_flags, void *txv, + const int list_id); +int Ja4IsDisabled(const char *type); +static InspectionBuffer *Ja4DetectGetHash(DetectEngineThreadCtx *det_ctx, + const DetectEngineTransforms *transforms, Flow *_f, const uint8_t _flow_flags, void *txv, + const int list_id); + +static int g_ja4_hash_buffer_id = 0; + +/** + * \brief Registration function for keyword: ja4.hash + */ +void DetectJa4HashRegister(void) +{ + sigmatch_table[DETECT_AL_JA4_HASH].name = "ja4.hash"; + sigmatch_table[DETECT_AL_JA4_HASH].alias = "ja4_hash"; + sigmatch_table[DETECT_AL_JA4_HASH].desc = "sticky buffer to match the JA4 hash buffer"; + sigmatch_table[DETECT_AL_JA4_HASH].url = "/rules/ja4-keywords.html#ja4-hash"; +#ifdef HAVE_JA4 + sigmatch_table[DETECT_AL_JA4_HASH].Setup = DetectJa4HashSetup; +#else /* HAVE_JA4 */ + sigmatch_table[DETECT_AL_JA4_HASH].Setup = DetectJA4SetupNoSupport; +#endif /* HAVE_JA4 */ + sigmatch_table[DETECT_AL_JA4_HASH].flags |= SIGMATCH_NOOPT; + sigmatch_table[DETECT_AL_JA4_HASH].flags |= SIGMATCH_INFO_STICKY_BUFFER; + +#ifdef HAVE_JA4 + DetectAppLayerInspectEngineRegister2("ja4.hash", ALPROTO_TLS, SIG_FLAG_TOSERVER, 0, + DetectEngineInspectBufferGeneric, GetData); + + DetectAppLayerMpmRegister2( + "ja4.hash", SIG_FLAG_TOSERVER, 2, PrefilterGenericMpmRegister, GetData, ALPROTO_TLS, 0); + + DetectAppLayerMpmRegister2("ja4.hash", SIG_FLAG_TOSERVER, 2, PrefilterGenericMpmRegister, + Ja4DetectGetHash, ALPROTO_QUIC, 1); + + DetectAppLayerInspectEngineRegister2("ja4.hash", ALPROTO_QUIC, SIG_FLAG_TOSERVER, 1, + DetectEngineInspectBufferGeneric, Ja4DetectGetHash); + + DetectBufferTypeSetDescriptionByName("ja4.hash", "TLS JA4 hash"); + + g_ja4_hash_buffer_id = DetectBufferTypeGetByName("ja4.hash"); +#endif /* HAVE_JA4 */ +} + +/** + * \brief this function setup the ja4.hash modifier keyword used in the rule + * + * \param de_ctx Pointer to the Detection Engine Context + * \param s Pointer to the Signature to which the current keyword belongs + * \param str Should hold an empty string always + * + * \retval 0 On success + * \retval -1 On failure + */ +static int DetectJa4HashSetup(DetectEngineCtx *de_ctx, Signature *s, const char *str) +{ + if (DetectBufferSetActiveList(de_ctx, s, g_ja4_hash_buffer_id) < 0) + return -1; + + if (s->alproto != ALPROTO_UNKNOWN && s->alproto != ALPROTO_TLS && s->alproto != ALPROTO_QUIC) { + SCLogError("rule contains conflicting protocols."); + return -1; + } + + /* try to enable JA4 */ + SSLEnableJA4(); + + /* check if JA4 enabling had an effect */ + if (!RunmodeIsUnittests() && !SSLJA4IsEnabled()) { + if (!SigMatchSilentErrorEnabled(de_ctx, DETECT_AL_JA4_HASH)) { + SCLogError("JA4 support is not enabled"); + } + return -2; + } else { + static bool once = false; + if (!once) { + once = true; + SCLogConfig("enabling JA4 due to rule usage"); + } + } + s->init_data->init_flags |= SIG_FLAG_INIT_JA; + s->flags |= SIG_FLAG_JA4; + + return 0; +} + +static InspectionBuffer *GetData(DetectEngineThreadCtx *det_ctx, + const DetectEngineTransforms *transforms, Flow *f, const uint8_t flow_flags, void *txv, + const int list_id) +{ + InspectionBuffer *buffer = InspectionBufferGet(det_ctx, list_id); + if (buffer->inspect == NULL) { + const SSLState *ssl_state = (SSLState *)f->alstate; + + if (ssl_state->client_connp.ja4 == NULL) { + return NULL; + } + + uint8_t data[JA4_HEX_LEN]; + SCJA4GetHash(ssl_state->client_connp.ja4, (uint8_t(*)[JA4_HEX_LEN])data); + + InspectionBufferSetup(det_ctx, list_id, buffer, data, 0); + InspectionBufferCopy(buffer, data, JA4_HEX_LEN); + InspectionBufferApplyTransforms(buffer, transforms); + } + + return buffer; +} + +static InspectionBuffer *Ja4DetectGetHash(DetectEngineThreadCtx *det_ctx, + const DetectEngineTransforms *transforms, Flow *_f, const uint8_t _flow_flags, void *txv, + const int list_id) +{ + InspectionBuffer *buffer = InspectionBufferGet(det_ctx, list_id); + if (buffer->inspect == NULL) { + uint32_t b_len = 0; + const uint8_t *b = NULL; + + if (rs_quic_tx_get_ja4(txv, &b, &b_len) != 1) + return NULL; + if (b == NULL || b_len == 0) + return NULL; + + InspectionBufferSetup(det_ctx, list_id, buffer, NULL, 0); + InspectionBufferCopy(buffer, (uint8_t *)b, JA4_HEX_LEN); + InspectionBufferApplyTransforms(buffer, transforms); + } + return buffer; +} diff --git a/src/detect-ja4-hash.h b/src/detect-ja4-hash.h new file mode 100644 index 000000000000..f3a5782e36fd --- /dev/null +++ b/src/detect-ja4-hash.h @@ -0,0 +1,30 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Sascha Steinbiss + */ + +#ifndef __DETECT_JA4_HASH_H__ +#define __DETECT_JA4_HASH_H__ + +/* Prototypes */ +void DetectJa4HashRegister(void); + +#endif /* __DETECT_JA4_HASH_H__ */ diff --git a/src/detect-noalert.c b/src/detect-noalert.c index dda060a4f069..e26fd343a44a 100644 --- a/src/detect-noalert.c +++ b/src/detect-noalert.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2010 Open Information Security Foundation +/* Copyright (C) 2007-2024 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free @@ -20,33 +20,43 @@ * * \author Victor Julien * - * Implements the noalert keyword + * Implements the noalert and alert keywords. */ #include "suricata-common.h" +#include "action-globals.h" #include "detect.h" #include "detect-noalert.h" #include "util-debug.h" +#include "util-validate.h" -static int DetectNoalertSetup (DetectEngineCtx *, Signature *, const char *); - -void DetectNoalertRegister (void) +static int DetectNoalertSetup(DetectEngineCtx *de_ctx, Signature *s, const char *nullstr) { - sigmatch_table[DETECT_NOALERT].name = "noalert"; - sigmatch_table[DETECT_NOALERT].desc = "no alert will be generated by the rule"; - sigmatch_table[DETECT_NOALERT].url = "/rules/flow-keywords.html"; - sigmatch_table[DETECT_NOALERT].Setup = DetectNoalertSetup; - sigmatch_table[DETECT_NOALERT].flags |= SIGMATCH_NOOPT; + DEBUG_VALIDATE_BUG_ON(nullstr != NULL); + + s->action &= ~ACTION_ALERT; + return 0; } -static int DetectNoalertSetup (DetectEngineCtx *de_ctx, Signature *s, const char *nullstr) +static int DetectAlertSetup(DetectEngineCtx *de_ctx, Signature *s, const char *nullstr) { - if (nullstr != NULL) { - SCLogError("nocase has no value"); - return -1; - } + DEBUG_VALIDATE_BUG_ON(nullstr != NULL); - s->flags |= SIG_FLAG_NOALERT; + s->action |= ACTION_ALERT; return 0; } +void DetectNoalertRegister(void) +{ + sigmatch_table[DETECT_NOALERT].name = "noalert"; + sigmatch_table[DETECT_NOALERT].desc = "no alert will be generated by the rule"; + sigmatch_table[DETECT_NOALERT].url = "/rules/noalert.html"; + sigmatch_table[DETECT_NOALERT].Setup = DetectNoalertSetup; + sigmatch_table[DETECT_NOALERT].flags |= SIGMATCH_NOOPT; + + sigmatch_table[DETECT_ALERT].name = "alert"; + sigmatch_table[DETECT_ALERT].desc = "alert will be generated by the rule"; + sigmatch_table[DETECT_ALERT].url = "/rules/noalert.html"; + sigmatch_table[DETECT_ALERT].Setup = DetectAlertSetup; + sigmatch_table[DETECT_ALERT].flags |= SIGMATCH_NOOPT; +} diff --git a/src/detect-parse.c b/src/detect-parse.c index d9800f0a2f34..5a2f44c97cec 100644 --- a/src/detect-parse.c +++ b/src/detect-parse.c @@ -840,7 +840,8 @@ int SigMatchListSMBelongsTo(const Signature *s, const SigMatch *key_sm) return -1; } -static int SigParseOptions(DetectEngineCtx *de_ctx, Signature *s, char *optstr, char *output, size_t output_size) +static int SigParseOptions(DetectEngineCtx *de_ctx, Signature *s, char *optstr, char *output, + size_t output_size, bool requires) { SigTableElmt *st = NULL; char *optname = NULL; @@ -894,6 +895,13 @@ static int SigParseOptions(DetectEngineCtx *de_ctx, Signature *s, char *optstr, } optname = optstr; + /* Check for options that are only to be processed during the + * first "requires" pass. */ + bool requires_only = strcasecmp(optname, "requires") == 0 || strcasecmp(optname, "sid") == 0; + if ((requires && !requires_only) || (!requires && requires_only)) { + goto finish; + } + /* Call option parsing */ st = SigTableGet(optname); if (st == NULL || st->Setup == NULL) { @@ -1036,6 +1044,7 @@ static int SigParseOptions(DetectEngineCtx *de_ctx, Signature *s, char *optstr, } s->init_data->negated = false; +finish: if (strlen(optend) > 0) { strlcpy(output, optend, output_size); return 1; @@ -1206,7 +1215,7 @@ static int SigParseAction(Signature *s, const char *action) if (strcasecmp(action, "alert") == 0) { s->action = ACTION_ALERT; } else if (strcasecmp(action, "drop") == 0) { - s->action = ACTION_DROP; + s->action = ACTION_DROP | ACTION_ALERT; } else if (strcasecmp(action, "pass") == 0) { s->action = ACTION_PASS; } else if (strcasecmp(action, "reject") == 0 || @@ -1214,18 +1223,17 @@ static int SigParseAction(Signature *s, const char *action) { if (!(SigParseActionRejectValidate(action))) return -1; - s->action = ACTION_REJECT|ACTION_DROP; + s->action = ACTION_REJECT | ACTION_DROP | ACTION_ALERT; } else if (strcasecmp(action, "rejectdst") == 0) { if (!(SigParseActionRejectValidate(action))) return -1; - s->action = ACTION_REJECT_DST|ACTION_DROP; + s->action = ACTION_REJECT_DST | ACTION_DROP | ACTION_ALERT; } else if (strcasecmp(action, "rejectboth") == 0) { if (!(SigParseActionRejectValidate(action))) return -1; - s->action = ACTION_REJECT_BOTH|ACTION_DROP; + s->action = ACTION_REJECT_BOTH | ACTION_DROP | ACTION_ALERT; } else if (strcasecmp(action, "config") == 0) { s->action = ACTION_CONFIG; - s->flags |= SIG_FLAG_NOALERT; } else { SCLogError("An invalid action \"%s\" was given", action); return -1; @@ -1321,9 +1329,11 @@ static inline int SigParseList(char **input, char *output, /** * \internal * \brief split a signature string into a few blocks for further parsing + * + * \param scan_only just scan, don't validate */ -static int SigParseBasics(DetectEngineCtx *de_ctx, - Signature *s, const char *sigstr, SignatureParser *parser, uint8_t addrs_direction) +static int SigParseBasics(DetectEngineCtx *de_ctx, Signature *s, const char *sigstr, + SignatureParser *parser, uint8_t addrs_direction, bool scan_only) { char *index, dup[DETECT_MAX_RULE_SIZE]; @@ -1368,6 +1378,10 @@ static int SigParseBasics(DetectEngineCtx *de_ctx, } strlcpy(parser->opts, index, sizeof(parser->opts)); + if (scan_only) { + return 0; + } + /* Parse Action */ if (SigParseAction(s, parser->action) < 0) goto error; @@ -1429,12 +1443,13 @@ static inline bool CheckAscii(const char *str) * \param s memory structure to store the signature in * \param sigstr the raw signature as a null terminated string * \param addrs_direction direction (for bi-directional sigs) + * \param require only scan rule for requires * * \param -1 parse error * \param 0 ok */ -static int SigParse(DetectEngineCtx *de_ctx, Signature *s, - const char *sigstr, uint8_t addrs_direction, SignatureParser *parser) +static int SigParse(DetectEngineCtx *de_ctx, Signature *s, const char *sigstr, + uint8_t addrs_direction, SignatureParser *parser, bool requires) { SCEnter(); @@ -1448,12 +1463,7 @@ static int SigParse(DetectEngineCtx *de_ctx, Signature *s, SCReturnInt(-1); } - s->sig_str = SCStrdup(sigstr); - if (unlikely(s->sig_str == NULL)) { - SCReturnInt(-1); - } - - int ret = SigParseBasics(de_ctx, s, sigstr, parser, addrs_direction); + int ret = SigParseBasics(de_ctx, s, sigstr, parser, addrs_direction, requires); if (ret < 0) { SCLogDebug("SigParseBasics failed"); SCReturnInt(-1); @@ -1465,21 +1475,27 @@ static int SigParse(DetectEngineCtx *de_ctx, Signature *s, char input[buffer_size]; char output[buffer_size]; memset(input, 0x00, buffer_size); - memcpy(input, parser->opts, strlen(parser->opts)+1); + memcpy(input, parser->opts, strlen(parser->opts) + 1); /* loop the option parsing. Each run processes one option * and returns the rest of the option string through the * output variable. */ do { memset(output, 0x00, buffer_size); - ret = SigParseOptions(de_ctx, s, input, output, buffer_size); + ret = SigParseOptions(de_ctx, s, input, output, buffer_size, requires); if (ret == 1) { memcpy(input, output, buffer_size); } } while (ret == 1); + + if (ret < 0) { + /* Suricata didn't meet the rule requirements, skip. */ + goto end; + } } +end: DetectIPProtoRemoveAllSMs(de_ctx, s); SCReturnInt(ret); @@ -1523,6 +1539,7 @@ Signature *SigAlloc (void) sig->init_data->buffers = SCCalloc(8, sizeof(SignatureInitDataBuffer)); if (sig->init_data->buffers == NULL) { + SCFree(sig->init_data); SCFree(sig); return NULL; } @@ -2096,9 +2113,9 @@ static int SigValidate(DetectEngineCtx *de_ctx, Signature *s) DetectLuaPostSetup(s); #endif - if (s->init_data->init_flags & SIG_FLAG_INIT_JA3 && s->alproto != ALPROTO_UNKNOWN && + if ((s->init_data->init_flags & SIG_FLAG_INIT_JA) && s->alproto != ALPROTO_UNKNOWN && s->alproto != ALPROTO_TLS && s->alproto != ALPROTO_QUIC) { - SCLogError("Cannot have ja3 with protocol %s.", AppProtoToString(s->alproto)); + SCLogError("Cannot have ja3/ja4 with protocol %s.", AppProtoToString(s->alproto)); SCReturnInt(0); } if ((s->flags & SIG_FLAG_FILESTORE) || s->file_flags != 0 || @@ -2120,10 +2137,7 @@ static int SigValidate(DetectEngineCtx *de_ctx, Signature *s) AppLayerHtpNeedFileInspection(); } } - if (s->id == 0) { - SCLogError("Signature missing required value \"sid\"."); - SCReturnInt(0); - } + SCReturnInt(1); } @@ -2141,16 +2155,43 @@ static Signature *SigInitHelper(DetectEngineCtx *de_ctx, const char *sigstr, if (sig == NULL) goto error; + sig->sig_str = SCStrdup(sigstr); + if (unlikely(sig->sig_str == NULL)) { + goto error; + } + /* default gid to 1 */ sig->gid = 1; - int ret = SigParse(de_ctx, sig, sigstr, dir, &parser); - if (ret == -3) { + /* We do a first parse of the rule in a requires, or scan-only + * mode. Syntactic errors will be picked up here, but the only + * part of the rule that is validated completely is the "requires" + * keyword. */ + int ret = SigParse(de_ctx, sig, sigstr, dir, &parser, true); + if (ret == -4) { + /* Rule requirements not met. */ de_ctx->sigerror_silent = true; de_ctx->sigerror_ok = true; + de_ctx->sigerror_requires = true; + goto error; + } else if (ret < 0) { + goto error; + } + + /* Check for a SID before continuuing. */ + if (sig->id == 0) { + SCLogError("Signature missing required value \"sid\"."); goto error; } - else if (ret == -2) { + + /* Now completely parse the rule. */ + ret = SigParse(de_ctx, sig, sigstr, dir, &parser, false); + BUG_ON(ret == -4); + if (ret == -3) { + de_ctx->sigerror_silent = true; + de_ctx->sigerror_ok = true; + goto error; + } else if (ret == -2) { de_ctx->sigerror_silent = true; goto error; } else if (ret < 0) { @@ -2297,7 +2338,9 @@ Signature *SigInit(DetectEngineCtx *de_ctx, const char *sigstr) SCEnter(); uint32_t oldsignum = de_ctx->signum; + de_ctx->sigerror_ok = false; de_ctx->sigerror_silent = false; + de_ctx->sigerror_requires = false; Signature *sig; @@ -2657,7 +2700,7 @@ int DetectParsePcreExec(DetectParseRegex *parse_regex, pcre2_match_data **match, *match = pcre2_match_data_create_from_pattern(parse_regex->regex, NULL); if (*match) return pcre2_match(parse_regex->regex, (PCRE2_SPTR8)str, strlen(str), options, start_offset, - *match, NULL); + *match, parse_regex->context); return -1; } @@ -2717,8 +2760,16 @@ bool DetectSetupParseRegexesOpts(const char *parse_str, DetectParseRegex *detect parse_str, en, errbuffer); return false; } - detect_parse->match = pcre2_match_data_create_from_pattern(detect_parse->regex, NULL); + detect_parse->context = pcre2_match_context_create(NULL); + if (detect_parse->context == NULL) { + SCLogError("pcre2 could not create match context"); + pcre2_code_free(detect_parse->regex); + detect_parse->regex = NULL; + return false; + } + pcre2_set_match_limit(detect_parse->context, SC_MATCH_LIMIT_DEFAULT); + pcre2_set_recursion_limit(detect_parse->context, SC_MATCH_LIMIT_RECURSION_DEFAULT); DetectParseRegexAddToFreeList(detect_parse); return true; diff --git a/src/detect-requires.c b/src/detect-requires.c new file mode 100644 index 000000000000..4d7f916b3b82 --- /dev/null +++ b/src/detect-requires.c @@ -0,0 +1,50 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include "detect-requires.h" +#include "suricata-common.h" +#include "detect-engine.h" +#include "rust.h" + +static int DetectRequiresSetup(DetectEngineCtx *de_ctx, Signature *s, const char *rawstr) +{ + if (de_ctx->requirements == NULL) { + de_ctx->requirements = (void *)SCDetectRequiresStatusNew(); + BUG_ON(de_ctx->requirements == NULL); + } + + const char *errmsg = NULL; + int res = SCDetectCheckRequires(rawstr, PROG_VER, &errmsg, de_ctx->requirements); + if (res == -1) { + // The requires expression is bad, log an error. + SCLogError("%s: %s", errmsg, rawstr); + de_ctx->sigerror = errmsg; + } else if (res < -1) { + // This Suricata instance didn't meet the requirements. + SCLogInfo("Suricata did not meet the rule requirements: %s: %s", errmsg, rawstr); + return -4; + } + return res; +} + +void DetectRequiresRegister(void) +{ + sigmatch_table[DETECT_REQUIRES].name = "requires"; + sigmatch_table[DETECT_REQUIRES].desc = "require Suricata version or features"; + sigmatch_table[DETECT_REQUIRES].url = "/rules/meta-keywords.html#requires"; + sigmatch_table[DETECT_REQUIRES].Setup = DetectRequiresSetup; +} diff --git a/src/detect-requires.h b/src/detect-requires.h new file mode 100644 index 000000000000..70f1dc43b814 --- /dev/null +++ b/src/detect-requires.h @@ -0,0 +1,23 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef __DETECT_REQUIRES_H__ +#define __DETECT_REQUIRES_H__ + +void DetectRequiresRegister(void); + +#endif /* __DETECT_REQUIRES_H__ */ diff --git a/src/detect-rfb-sectype.c b/src/detect-rfb-sectype.c index 400ee5cb087c..476ea3fd3c06 100644 --- a/src/detect-rfb-sectype.c +++ b/src/detect-rfb-sectype.c @@ -90,20 +90,6 @@ static int DetectRfbSectypeMatch (DetectEngineThreadCtx *det_ctx, SCReturnInt(0); } -/** - * \internal - * \brief Function to parse options passed via rfb.sectype keywords. - * - * \param rawstr Pointer to the user provided options. - * - * \retval dd pointer to DetectU32Data on success. - * \retval NULL on failure. - */ -static DetectU32Data *DetectRfbSectypeParse(const char *rawstr) -{ - return DetectU32Parse(rawstr); -} - /** * \brief Function to add the parsed RFB security type field into the current signature. * @@ -119,10 +105,10 @@ static int DetectRfbSectypeSetup (DetectEngineCtx *de_ctx, Signature *s, const c if (DetectSignatureSetAppProto(s, ALPROTO_RFB) != 0) return -1; - DetectU32Data *dd = DetectRfbSectypeParse(rawstr); + DetectU32Data *dd = DetectU32Parse(rawstr); if (dd == NULL) { SCLogError("Parsing \'%s\' failed", rawstr); - goto error; + return -1; } /* okay so far so good, lets get this into a SigMatch diff --git a/src/detect-snmp-version.c b/src/detect-snmp-version.c index 57359c091bd6..deb73b9bcd0d 100644 --- a/src/detect-snmp-version.c +++ b/src/detect-snmp-version.c @@ -140,7 +140,7 @@ static int DetectSNMPVersionSetup (DetectEngineCtx *de_ctx, Signature *s, dd = DetectSNMPVersionParse(rawstr); if (dd == NULL) { SCLogError("Parsing \'%s\' failed", rawstr); - goto error; + return -1; } /* okay so far so good, lets get this into a SigMatch diff --git a/src/detect-stream_size.c b/src/detect-stream_size.c index 86aabd77c8e8..50cd15af2d35 100644 --- a/src/detect-stream_size.c +++ b/src/detect-stream_size.c @@ -216,7 +216,7 @@ static bool PrefilterPacketStreamSizeCompare(PrefilterPacketHeaderValue v, void static int PrefilterSetupStreamSize(DetectEngineCtx *de_ctx, SigGroupHead *sgh) { - return PrefilterSetupPacketHeader(de_ctx, sgh, DETECT_TCPMSS, PrefilterPacketStreamSizeSet, + return PrefilterSetupPacketHeader(de_ctx, sgh, DETECT_STREAM_SIZE, PrefilterPacketStreamSizeSet, PrefilterPacketStreamSizeCompare, PrefilterPacketStreamsizeMatch); } diff --git a/src/detect-tls-certs.c b/src/detect-tls-certs.c index a0204377373e..f233779b12d9 100644 --- a/src/detect-tls-certs.c +++ b/src/detect-tls-certs.c @@ -70,6 +70,7 @@ static int g_tls_certs_buffer_id = 0; struct TlsCertsGetDataArgs { uint32_t local_id; /**< used as index into thread inspect array */ SSLCertsChain *cert; + const uint8_t flags; }; typedef struct PrefilterMpmTlsCerts { @@ -150,7 +151,7 @@ static InspectionBuffer *TlsCertsGetData(DetectEngineThreadCtx *det_ctx, const SSLState *ssl_state = (SSLState *)f->alstate; const SSLStateConnp *connp; - if (f->flags & STREAM_TOSERVER) { + if (cbdata->flags & STREAM_TOSERVER) { connp = &ssl_state->client_connp; } else { connp = &ssl_state->server_connp; @@ -185,7 +186,7 @@ static uint8_t DetectEngineInspectTlsCerts(DetectEngineCtx *de_ctx, DetectEngine transforms = engine->v2.transforms; } - struct TlsCertsGetDataArgs cbdata = { 0, NULL }; + struct TlsCertsGetDataArgs cbdata = { .local_id = 0, .cert = NULL, .flags = flags }; while (1) { @@ -222,7 +223,7 @@ static void PrefilterTxTlsCerts(DetectEngineThreadCtx *det_ctx, const void *pect const MpmCtx *mpm_ctx = ctx->mpm_ctx; const int list_id = ctx->list_id; - struct TlsCertsGetDataArgs cbdata = { 0, NULL }; + struct TlsCertsGetDataArgs cbdata = { .local_id = 0, .cert = NULL, .flags = flags }; while (1) { diff --git a/src/detect-tls-ja3-hash.c b/src/detect-tls-ja3-hash.c index 7660fde4c2a0..2270896c915d 100644 --- a/src/detect-tls-ja3-hash.c +++ b/src/detect-tls-ja3-hash.c @@ -56,6 +56,14 @@ #include "util-unittest.h" #include "util-unittest-helper.h" +#ifndef HAVE_JA3 +static int DetectJA3SetupNoSupport(DetectEngineCtx *a, Signature *b, const char *c) +{ + SCLogError("no JA3 support built in"); + return -1; +} +#endif + static int DetectTlsJa3HashSetup(DetectEngineCtx *, Signature *, const char *); static InspectionBuffer *GetData(DetectEngineThreadCtx *det_ctx, const DetectEngineTransforms *transforms, @@ -76,10 +84,15 @@ void DetectTlsJa3HashRegister(void) sigmatch_table[DETECT_AL_TLS_JA3_HASH].alias = "ja3_hash"; sigmatch_table[DETECT_AL_TLS_JA3_HASH].desc = "sticky buffer to match the JA3 hash buffer"; sigmatch_table[DETECT_AL_TLS_JA3_HASH].url = "/rules/ja3-keywords.html#ja3-hash"; +#ifdef HAVE_JA3 sigmatch_table[DETECT_AL_TLS_JA3_HASH].Setup = DetectTlsJa3HashSetup; +#else /* HAVE_JA3 */ + sigmatch_table[DETECT_AL_TLS_JA3_HASH].Setup = DetectJA3SetupNoSupport; +#endif /* HAVE_JA3 */ sigmatch_table[DETECT_AL_TLS_JA3_HASH].flags |= SIGMATCH_NOOPT; sigmatch_table[DETECT_AL_TLS_JA3_HASH].flags |= SIGMATCH_INFO_STICKY_BUFFER; +#ifdef HAVE_JA3 DetectAppLayerInspectEngineRegister2("ja3.hash", ALPROTO_TLS, SIG_FLAG_TOSERVER, 0, DetectEngineInspectBufferGeneric, GetData); @@ -101,6 +114,7 @@ void DetectTlsJa3HashRegister(void) DetectTlsJa3HashValidateCallback); g_tls_ja3_hash_buffer_id = DetectBufferTypeGetByName("ja3.hash"); +#endif /* HAVE_JA3 */ } /** @@ -134,7 +148,7 @@ static int DetectTlsJa3HashSetup(DetectEngineCtx *de_ctx, Signature *s, const ch } return -2; } - s->init_data->init_flags |= SIG_FLAG_INIT_JA3; + s->init_data->init_flags |= SIG_FLAG_INIT_JA; return 0; } diff --git a/src/detect-tls-ja3-string.c b/src/detect-tls-ja3-string.c index 87a61bfd8738..0b0050f9d66f 100644 --- a/src/detect-tls-ja3-string.c +++ b/src/detect-tls-ja3-string.c @@ -56,6 +56,14 @@ #include "util-unittest.h" #include "util-unittest-helper.h" +#ifndef HAVE_JA3 +static int DetectJA3SetupNoSupport(DetectEngineCtx *a, Signature *b, const char *c) +{ + SCLogError("no JA3 support built in"); + return -1; +} +#endif /* HAVE_JA3 */ + static int DetectTlsJa3StringSetup(DetectEngineCtx *, Signature *, const char *); static InspectionBuffer *GetData(DetectEngineThreadCtx *det_ctx, const DetectEngineTransforms *transforms, @@ -72,10 +80,15 @@ void DetectTlsJa3StringRegister(void) sigmatch_table[DETECT_AL_TLS_JA3_STRING].alias = "ja3_string"; sigmatch_table[DETECT_AL_TLS_JA3_STRING].desc = "sticky buffer to match the JA3 string buffer"; sigmatch_table[DETECT_AL_TLS_JA3_STRING].url = "/rules/ja3-keywords.html#ja3-string"; +#ifdef HAVE_JA3 sigmatch_table[DETECT_AL_TLS_JA3_STRING].Setup = DetectTlsJa3StringSetup; +#else /* HAVE_JA3 */ + sigmatch_table[DETECT_AL_TLS_JA3_STRING].Setup = DetectJA3SetupNoSupport; +#endif /* HAVE_JA3 */ sigmatch_table[DETECT_AL_TLS_JA3_STRING].flags |= SIGMATCH_NOOPT; sigmatch_table[DETECT_AL_TLS_JA3_STRING].flags |= SIGMATCH_INFO_STICKY_BUFFER; +#ifdef HAVE_JA3 DetectAppLayerInspectEngineRegister2("ja3.string", ALPROTO_TLS, SIG_FLAG_TOSERVER, 0, DetectEngineInspectBufferGeneric, GetData); @@ -91,6 +104,7 @@ void DetectTlsJa3StringRegister(void) DetectBufferTypeSetDescriptionByName("ja3.string", "TLS JA3 string"); g_tls_ja3_str_buffer_id = DetectBufferTypeGetByName("ja3.string"); +#endif /* HAVE_JA3 */ } /** @@ -123,7 +137,7 @@ static int DetectTlsJa3StringSetup(DetectEngineCtx *de_ctx, Signature *s, const } return -2; } - s->init_data->init_flags |= SIG_FLAG_INIT_JA3; + s->init_data->init_flags |= SIG_FLAG_INIT_JA; return 0; } diff --git a/src/detect-tls-ja3s-hash.c b/src/detect-tls-ja3s-hash.c index 583566012d08..cecc98a2c1b2 100644 --- a/src/detect-tls-ja3s-hash.c +++ b/src/detect-tls-ja3s-hash.c @@ -56,6 +56,14 @@ #include "util-unittest.h" #include "util-unittest-helper.h" +#ifndef HAVE_JA3 +static int DetectJA3SetupNoSupport(DetectEngineCtx *a, Signature *b, const char *c) +{ + SCLogError("no JA3 support built in"); + return -1; +} +#endif /* HAVE_JA3 */ + static int DetectTlsJa3SHashSetup(DetectEngineCtx *, Signature *, const char *); static InspectionBuffer *GetData(DetectEngineThreadCtx *det_ctx, const DetectEngineTransforms *transforms, @@ -75,10 +83,15 @@ void DetectTlsJa3SHashRegister(void) sigmatch_table[DETECT_AL_TLS_JA3S_HASH].name = "ja3s.hash"; sigmatch_table[DETECT_AL_TLS_JA3S_HASH].desc = "sticky buffer to match the JA3S hash buffer"; sigmatch_table[DETECT_AL_TLS_JA3S_HASH].url = "/rules/ja3-keywords.html#ja3s-hash"; +#ifdef HAVE_JA3 sigmatch_table[DETECT_AL_TLS_JA3S_HASH].Setup = DetectTlsJa3SHashSetup; +#else /* HAVE_JA3 */ + sigmatch_table[DETECT_AL_TLS_JA3S_HASH].Setup = DetectJA3SetupNoSupport; +#endif /* HAVE_JA3 */ sigmatch_table[DETECT_AL_TLS_JA3S_HASH].flags |= SIGMATCH_NOOPT; sigmatch_table[DETECT_AL_TLS_JA3S_HASH].flags |= SIGMATCH_INFO_STICKY_BUFFER; +#ifdef HAVE_JA3 DetectAppLayerInspectEngineRegister2("ja3s.hash", ALPROTO_TLS, SIG_FLAG_TOCLIENT, 0, DetectEngineInspectBufferGeneric, GetData); @@ -100,6 +113,7 @@ void DetectTlsJa3SHashRegister(void) DetectTlsJa3SHashValidateCallback); g_tls_ja3s_hash_buffer_id = DetectBufferTypeGetByName("ja3s.hash"); +#endif /* HAVE_JA3 */ } /** @@ -132,7 +146,7 @@ static int DetectTlsJa3SHashSetup(DetectEngineCtx *de_ctx, Signature *s, const c } return -2; } - s->init_data->init_flags |= SIG_FLAG_INIT_JA3; + s->init_data->init_flags |= SIG_FLAG_INIT_JA; return 0; } diff --git a/src/detect-tls-ja3s-string.c b/src/detect-tls-ja3s-string.c index 0f7f7d61d067..d1d8e243c6c7 100644 --- a/src/detect-tls-ja3s-string.c +++ b/src/detect-tls-ja3s-string.c @@ -56,6 +56,14 @@ #include "util-unittest.h" #include "util-unittest-helper.h" +#ifndef HAVE_JA3 +static int DetectJA3SetupNoSupport(DetectEngineCtx *a, Signature *b, const char *c) +{ + SCLogError("no JA3 support built in"); + return -1; +} +#endif /* HAVE_JA3 */ + static int DetectTlsJa3SStringSetup(DetectEngineCtx *, Signature *, const char *); static InspectionBuffer *GetData(DetectEngineThreadCtx *det_ctx, const DetectEngineTransforms *transforms, @@ -72,10 +80,15 @@ void DetectTlsJa3SStringRegister(void) sigmatch_table[DETECT_AL_TLS_JA3S_STRING].desc = "sticky buffer to match the JA3S string buffer"; sigmatch_table[DETECT_AL_TLS_JA3S_STRING].url = "/rules/ja3-keywords.html#ja3s-string"; +#ifdef HAVE_JA3 sigmatch_table[DETECT_AL_TLS_JA3S_STRING].Setup = DetectTlsJa3SStringSetup; +#else /* HAVE_JA3 */ + sigmatch_table[DETECT_AL_TLS_JA3S_STRING].Setup = DetectJA3SetupNoSupport; +#endif /* HAVE_JA3 */ sigmatch_table[DETECT_AL_TLS_JA3S_STRING].flags |= SIGMATCH_NOOPT; sigmatch_table[DETECT_AL_TLS_JA3S_STRING].flags |= SIGMATCH_INFO_STICKY_BUFFER; +#ifdef HAVE_JA3 DetectAppLayerInspectEngineRegister2("ja3s.string", ALPROTO_TLS, SIG_FLAG_TOCLIENT, 0, DetectEngineInspectBufferGeneric, GetData); @@ -91,6 +104,7 @@ void DetectTlsJa3SStringRegister(void) DetectBufferTypeSetDescriptionByName("ja3s.string", "TLS JA3S string"); g_tls_ja3s_str_buffer_id = DetectBufferTypeGetByName("ja3s.string"); +#endif /* HAVE_JA3 */ } /** @@ -123,7 +137,7 @@ static int DetectTlsJa3SStringSetup(DetectEngineCtx *de_ctx, Signature *s, const } return -2; } - s->init_data->init_flags |= SIG_FLAG_INIT_JA3; + s->init_data->init_flags |= SIG_FLAG_INIT_JA; return 0; } diff --git a/src/detect-tls-random.c b/src/detect-tls-random.c index fc4369ab1861..cf1a22bbe542 100644 --- a/src/detect-tls-random.c +++ b/src/detect-tls-random.c @@ -218,9 +218,9 @@ static InspectionBuffer *GetRandomTimeData(DetectEngineThreadCtx *det_ctx, const uint32_t data_len = DETECT_TLS_RANDOM_TIME_LEN; const uint8_t *data; if (flow_flags & STREAM_TOSERVER) { - data = ssl_state->server_connp.random; - } else { data = ssl_state->client_connp.random; + } else { + data = ssl_state->server_connp.random; } InspectionBufferSetup(det_ctx, list_id, buffer, data, data_len); InspectionBufferApplyTransforms(buffer, transforms); @@ -245,9 +245,9 @@ static InspectionBuffer *GetRandomBytesData(DetectEngineThreadCtx *det_ctx, const uint32_t data_len = DETECT_TLS_RANDOM_BYTES_LEN; const uint8_t *data; if (flow_flags & STREAM_TOSERVER) { - data = ssl_state->server_connp.random + DETECT_TLS_RANDOM_TIME_LEN; - } else { data = ssl_state->client_connp.random + DETECT_TLS_RANDOM_TIME_LEN; + } else { + data = ssl_state->server_connp.random + DETECT_TLS_RANDOM_TIME_LEN; } InspectionBufferSetup(det_ctx, list_id, buffer, data, data_len); InspectionBufferApplyTransforms(buffer, transforms); @@ -272,9 +272,9 @@ static InspectionBuffer *GetRandomData(DetectEngineThreadCtx *det_ctx, const uint32_t data_len = TLS_RANDOM_LEN; const uint8_t *data; if (flow_flags & STREAM_TOSERVER) { - data = ssl_state->server_connp.random; - } else { data = ssl_state->client_connp.random; + } else { + data = ssl_state->server_connp.random; } InspectionBufferSetup(det_ctx, list_id, buffer, data, data_len); InspectionBufferApplyTransforms(buffer, transforms); diff --git a/src/detect-tls.c b/src/detect-tls.c index 71e45696cd9c..3e81b87790b5 100644 --- a/src/detect-tls.c +++ b/src/detect-tls.c @@ -627,7 +627,7 @@ static int DetectTlsStorePostMatch (DetectEngineThreadCtx *det_ctx, SSLStateConnp *connp; - if (p->flow->flags & STREAM_TOSERVER) { + if (PKT_IS_TOSERVER(p)) { connp = &ssl_state->client_connp; } else { connp = &ssl_state->server_connp; diff --git a/src/detect-transform-casechange.c b/src/detect-transform-casechange.c new file mode 100644 index 000000000000..851030828ced --- /dev/null +++ b/src/detect-transform-casechange.c @@ -0,0 +1,169 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Jeff Lucovsky + * + * Implements case changing transforms + */ + +#include "suricata-common.h" +#include "detect.h" +#include "detect-engine.h" +#include "detect-parse.h" +#include "detect-transform-casechange.h" + +/** + * \internal + * \brief Register the to_lowercase transform + * \param det_ctx detection engine ctx + * \param s signature + * \param optstr options string + * \retval 0 ok + * \retval -1 failure + */ +static int DetectTransformToLowerSetup(DetectEngineCtx *de_ctx, Signature *s, const char *optstr) +{ + SCEnter(); + + int r = DetectSignatureAddTransform(s, DETECT_TRANSFORM_TOLOWER, NULL); + + SCReturnInt(r); +} + +/** + * \internal + * \brief Apply the to_lowercase keyword to the last pattern match + * \param buffer Inspection buffer + * \param optstr options string + */ +static void DetectTransformToLower(InspectionBuffer *buffer, void *options) +{ + const uint8_t *input = buffer->inspect; + const uint32_t input_len = buffer->inspect_len; + + if (input_len == 0) { + return; + } + + uint8_t output[input_len]; + for (uint32_t i = 0; i < input_len; i++) { + output[i] = u8_tolower(input[i]); + } + + InspectionBufferCopy(buffer, output, input_len); +} +/** + * \internal + * \brief Register the to_upperrcase transform + * \param det_ctx detection engine ctx + * \param s signature + * \param optstr options string + * \retval 0 ok + * \retval -1 failure + */ +static int DetectTransformToUpperSetup(DetectEngineCtx *de_ctx, Signature *s, const char *optstr) +{ + SCEnter(); + + int r = DetectSignatureAddTransform(s, DETECT_TRANSFORM_TOUPPER, NULL); + + SCReturnInt(r); +} + +/** + * \internal + * \brief Apply the to_uppercase keyword to the last pattern match + * \param buffer Inspection buffer + * \param optstr options string + */ +static void DetectTransformToUpper(InspectionBuffer *buffer, void *options) +{ + const uint8_t *input = buffer->inspect; + const uint32_t input_len = buffer->inspect_len; + + if (input_len == 0) { + return; + } + + uint8_t output[input_len]; + for (uint32_t i = 0; i < input_len; i++) { + output[i] = u8_toupper(input[i]); + } + + InspectionBufferCopy(buffer, output, input_len); +} + +/* + * \internal + * \brief Check if content is compatible with transform + * + * If the content contains any lowercase characters, than it is not compatible. + */ +static bool TransformToUpperValidate(const uint8_t *content, uint16_t content_len, void *options) +{ + if (content) { + for (uint32_t i = 0; i < content_len; i++) { + if (islower(*content++)) { + return false; + } + } + } + return true; +} + +/* + * \internal + * \brief Check if content is compatible with transform + * + * If the content contains any uppercase characters, than it is not compatible. + */ +static bool TransformToLowerValidate(const uint8_t *content, uint16_t content_len, void *options) +{ + if (content) { + for (uint32_t i = 0; i < content_len; i++) { + if (isupper(*content++)) { + return false; + } + } + } + return true; +} + +void DetectTransformToUpperRegister(void) +{ + sigmatch_table[DETECT_TRANSFORM_TOUPPER].name = "to_uppercase"; + sigmatch_table[DETECT_TRANSFORM_TOUPPER].desc = "convert buffer to uppercase"; + sigmatch_table[DETECT_TRANSFORM_TOUPPER].url = "/rules/transforms.html#to_uppercase"; + sigmatch_table[DETECT_TRANSFORM_TOUPPER].Transform = DetectTransformToUpper; + sigmatch_table[DETECT_TRANSFORM_TOUPPER].TransformValidate = TransformToUpperValidate; + sigmatch_table[DETECT_TRANSFORM_TOUPPER].Setup = DetectTransformToUpperSetup; + sigmatch_table[DETECT_TRANSFORM_TOUPPER].flags |= SIGMATCH_NOOPT; +} + +void DetectTransformToLowerRegister(void) +{ + sigmatch_table[DETECT_TRANSFORM_TOLOWER].name = "to_lowercase"; + sigmatch_table[DETECT_TRANSFORM_TOLOWER].desc = "convert buffer to lowercase"; + sigmatch_table[DETECT_TRANSFORM_TOLOWER].url = "/rules/transforms.html#to_lowercase"; + sigmatch_table[DETECT_TRANSFORM_TOLOWER].Transform = DetectTransformToLower; + sigmatch_table[DETECT_TRANSFORM_TOLOWER].TransformValidate = TransformToLowerValidate; + sigmatch_table[DETECT_TRANSFORM_TOLOWER].Setup = DetectTransformToLowerSetup; + sigmatch_table[DETECT_TRANSFORM_TOLOWER].flags |= SIGMATCH_NOOPT; +} diff --git a/src/detect-transform-casechange.h b/src/detect-transform-casechange.h new file mode 100644 index 000000000000..db6275b915f7 --- /dev/null +++ b/src/detect-transform-casechange.h @@ -0,0 +1,31 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Jeff Lucovsky + */ + +#ifndef __DETECT_TRANSFORM_CASECHANGE_H +#define __DETECT_TRANSFORM_CASECHANGE_H + +/* prototypes */ +void DetectTransformToLowerRegister(void); +void DetectTransformToUpperRegister(void); + +#endif /* __DETECT_TRANSFORM_CASECHANGE_H */ diff --git a/src/detect-transform-header-lowercase.c b/src/detect-transform-header-lowercase.c new file mode 100644 index 000000000000..7c776201b308 --- /dev/null +++ b/src/detect-transform-header-lowercase.c @@ -0,0 +1,88 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Philippe Antoine + * + * Implements the header_lowercase transform keyword with option support + */ + +#include "suricata-common.h" +#include "detect.h" +#include "detect-engine.h" +#include "detect-parse.h" +#include "detect-transform-header-lowercase.h" + +/** + * \internal + * \brief Apply the header_lowercase keyword to the last pattern match + * \param det_ctx detection engine ctx + * \param s signature + * \param optstr options string + * \retval 0 ok + * \retval -1 failure + */ +static int DetectTransformHeaderLowercaseSetup( + DetectEngineCtx *de_ctx, Signature *s, const char *optstr) +{ + SCEnter(); + int r = DetectSignatureAddTransform(s, DETECT_TRANSFORM_HEADER_LOWERCASE, NULL); + SCReturnInt(r); +} + +static void DetectTransformHeaderLowercase(InspectionBuffer *buffer, void *options) +{ + const uint8_t *input = buffer->inspect; + const uint32_t input_len = buffer->inspect_len; + if (input_len == 0) { + return; + } + uint8_t output[input_len]; + + // state 0 is header name, 1 is header value + int state = 0; + for (uint32_t i = 0; i < input_len; i++) { + if (state == 0) { + if (input[i] == ':') { + output[i] = input[i]; + state = 1; + } else { + output[i] = u8_tolower(input[i]); + } + } else { + output[i] = input[i]; + if (input[i] == '\n') { + state = 0; + } + } + } + InspectionBufferCopy(buffer, output, input_len); +} + +void DetectTransformHeaderLowercaseRegister(void) +{ + sigmatch_table[DETECT_TRANSFORM_HEADER_LOWERCASE].name = "header_lowercase"; + sigmatch_table[DETECT_TRANSFORM_HEADER_LOWERCASE].desc = + "modify buffer via lowercaseing header names"; + sigmatch_table[DETECT_TRANSFORM_HEADER_LOWERCASE].url = + "/rules/transforms.html#header_lowercase"; + sigmatch_table[DETECT_TRANSFORM_HEADER_LOWERCASE].Transform = DetectTransformHeaderLowercase; + sigmatch_table[DETECT_TRANSFORM_HEADER_LOWERCASE].Setup = DetectTransformHeaderLowercaseSetup; + sigmatch_table[DETECT_TRANSFORM_HEADER_LOWERCASE].flags |= SIGMATCH_NOOPT; +} diff --git a/src/detect-transform-header-lowercase.h b/src/detect-transform-header-lowercase.h new file mode 100644 index 000000000000..aca7f874a499 --- /dev/null +++ b/src/detect-transform-header-lowercase.h @@ -0,0 +1,30 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Philippe Antoine + */ + +#ifndef __DETECT_TRANSFORM_HEADER_LOWERCASE_H__ +#define __DETECT_TRANSFORM_HEADER_LOWERCASE_H__ + +/* prototypes */ +void DetectTransformHeaderLowercaseRegister(void); + +#endif /* __DETECT_TRANSFORM_HEADER_LOWERCASE_H__ */ diff --git a/src/detect-transform-strip-pseudo-headers.c b/src/detect-transform-strip-pseudo-headers.c new file mode 100644 index 000000000000..450900d46037 --- /dev/null +++ b/src/detect-transform-strip-pseudo-headers.c @@ -0,0 +1,100 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Philippe Antoine + * + * Implements the strip_pseudo_headers transform keyword with option support + */ + +#include "suricata-common.h" +#include "detect.h" +#include "detect-engine.h" +#include "detect-parse.h" +#include "detect-transform-strip-pseudo-headers.h" + +/** + * \internal + * \brief Apply the strip_pseudo_headers keyword to the last pattern match + * \param det_ctx detection engine ctx + * \param s signature + * \param optstr options string + * \retval 0 ok + * \retval -1 failure + */ +static int DetectTransformStripPseudoHeadersSetup( + DetectEngineCtx *de_ctx, Signature *s, const char *optstr) +{ + SCEnter(); + int r = DetectSignatureAddTransform(s, DETECT_TRANSFORM_STRIP_PSEUDO_HEADERS, NULL); + SCReturnInt(r); +} + +static void DetectTransformStripPseudoHeaders(InspectionBuffer *buffer, void *options) +{ + const uint8_t *input = buffer->inspect; + const uint32_t input_len = buffer->inspect_len; + if (input_len == 0) { + return; + } + uint8_t output[input_len]; + + bool new_line = true; + bool pseudo = false; + uint32_t j = 0; + for (uint32_t i = 0; i < input_len; i++) { + if (new_line) { + if (input[i] == ':') { + pseudo = true; + } + if (input[i] != '\r' && input[i] != '\n') { + new_line = false; + } + } else { + if (input[i] == '\n') { + new_line = true; + if (!pseudo) { + output[j] = input[i]; + j++; + } + pseudo = false; + continue; + } + } + if (!pseudo) { + output[j] = input[i]; + j++; + } + } + InspectionBufferCopy(buffer, output, j); +} + +void DetectTransformStripPseudoHeadersRegister(void) +{ + sigmatch_table[DETECT_TRANSFORM_STRIP_PSEUDO_HEADERS].name = "strip_pseudo_headers"; + sigmatch_table[DETECT_TRANSFORM_STRIP_PSEUDO_HEADERS].desc = + "modify buffer via stripping pseudo headers"; + sigmatch_table[DETECT_TRANSFORM_STRIP_PSEUDO_HEADERS].url = + "/rules/transforms.html#strip_pseudo_headers"; + sigmatch_table[DETECT_TRANSFORM_STRIP_PSEUDO_HEADERS].Transform = + DetectTransformStripPseudoHeaders; + sigmatch_table[DETECT_TRANSFORM_STRIP_PSEUDO_HEADERS].Setup = + DetectTransformStripPseudoHeadersSetup; + sigmatch_table[DETECT_TRANSFORM_STRIP_PSEUDO_HEADERS].flags |= SIGMATCH_NOOPT; +} diff --git a/src/detect-transform-strip-pseudo-headers.h b/src/detect-transform-strip-pseudo-headers.h new file mode 100644 index 000000000000..c2016d438f04 --- /dev/null +++ b/src/detect-transform-strip-pseudo-headers.h @@ -0,0 +1,30 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Philippe Antoine + */ + +#ifndef __DETECT_TRANSFORM_STRIP_PSEUDOHEADERS_H__ +#define __DETECT_TRANSFORM_STRIP_PSEUDOHEADERS_H__ + +/* prototypes */ +void DetectTransformStripPseudoHeadersRegister(void); + +#endif /* __DETECT_TRANSFORM_STRIP_PSEUDOHEADERS_H__ */ diff --git a/src/detect-xbits.c b/src/detect-xbits.c index 4fae4414819f..f6a25663dfa3 100644 --- a/src/detect-xbits.c +++ b/src/detect-xbits.c @@ -25,6 +25,7 @@ #include "suricata-common.h" #include "decode.h" +#include "action-globals.h" #include "detect.h" #include "threads.h" #include "flow.h" @@ -341,9 +342,9 @@ int DetectXbitSetup (DetectEngineCtx *de_ctx, Signature *s, const char *rawstr) int result = DetectXbitParse(de_ctx, rawstr, &cd); if (result < 0) { return -1; - /* noalert doesn't use a cd/sm struct. It flags the sig. We're done. */ - } else if (result == 0 && cd == NULL) { - s->flags |= SIG_FLAG_NOALERT; + } else if (cd == NULL) { + /* noalert doesn't use a cd/sm struct. It flags the sig. We're done. */ + s->action &= ~ACTION_ALERT; return 0; } diff --git a/src/detect.c b/src/detect.c index 5cb4e6bfbc44..ac1f13906e8d 100644 --- a/src/detect.c +++ b/src/detect.c @@ -152,6 +152,12 @@ static void DetectRun(ThreadVars *th_v, DetectRunFrames(th_v, de_ctx, det_ctx, p, pflow, &scratch); // PACKET_PROFILING_DETECT_END(p, PROF_DETECT_TX); } + // no update to transactions + if (!PKT_IS_PSEUDOPKT(p) && p->app_update_direction == 0 && + ((PKT_IS_TOSERVER(p) && (p->flow->flags & FLOW_TS_APP_UPDATED) == 0) || + (PKT_IS_TOCLIENT(p) && (p->flow->flags & FLOW_TC_APP_UPDATED) == 0))) { + goto end; + } } else if (p->proto == IPPROTO_UDP) { DetectRunFrames(th_v, de_ctx, det_ctx, p, pflow, &scratch); } @@ -159,6 +165,11 @@ static void DetectRun(ThreadVars *th_v, PACKET_PROFILING_DETECT_START(p, PROF_DETECT_TX); DetectRunTx(th_v, de_ctx, det_ctx, p, pflow, &scratch); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_TX); + /* see if we need to increment the inspect_id and reset the de_state */ + PACKET_PROFILING_DETECT_START(p, PROF_DETECT_TX_UPDATE); + AppLayerParserSetTransactionInspectId( + pflow, pflow->alparser, pflow->alstate, scratch.flow_flags, (scratch.sgh == NULL)); + PACKET_PROFILING_DETECT_END(p, PROF_DETECT_TX_UPDATE); } end: @@ -802,7 +813,20 @@ static inline void DetectRulePacketRules( #endif DetectRunPostMatch(tv, det_ctx, p, s); - AlertQueueAppend(det_ctx, s, p, 0, alert_flags); + uint64_t txid = PACKET_ALERT_NOTX; + if ((alert_flags & PACKET_ALERT_FLAG_STREAM_MATCH) || + (s->alproto != ALPROTO_UNKNOWN && pflow->proto == IPPROTO_UDP)) { + // if there is a stream match (TCP), or + // a UDP specific app-layer signature, + // try to use the good tx for the packet direction + if (pflow->alstate) { + uint8_t dir = + (p->flowflags & FLOW_PKT_TOCLIENT) ? STREAM_TOCLIENT : STREAM_TOSERVER; + txid = AppLayerParserGetTransactionInspectId(pflow->alparser, dir); + alert_flags |= PACKET_ALERT_FLAG_TX; + } + } + AlertQueueAppend(det_ctx, s, p, txid, alert_flags); next: DetectVarProcessList(det_ctx, pflow, p); DetectReplaceFree(det_ctx); @@ -919,14 +943,6 @@ static inline void DetectRunPostRules( Flow * const pflow, DetectRunScratchpad *scratch) { - /* see if we need to increment the inspect_id and reset the de_state */ - if (pflow && pflow->alstate) { - PACKET_PROFILING_DETECT_START(p, PROF_DETECT_TX_UPDATE); - AppLayerParserSetTransactionInspectId(pflow, pflow->alparser, pflow->alstate, - scratch->flow_flags, (scratch->sgh == NULL)); - PACKET_PROFILING_DETECT_END(p, PROF_DETECT_TX_UPDATE); - } - /* so now let's iterate the alerts and remove the ones after a pass rule * matched (if any). This is done inside PacketAlertFinalize() */ /* PR: installed "tag" keywords are handled after the threshold inspection */ @@ -1301,6 +1317,81 @@ static inline void StoreDetectFlags(DetectTransaction *tx, const uint8_t flow_fl } } +// Merge 'state' rules from the regular prefilter +// updates array_idx on the way +static inline void RuleMatchCandidateMergeStateRules( + DetectEngineThreadCtx *det_ctx, uint32_t *array_idx) +{ + // Now, we will merge 2 sorted lists : + // the one in det_ctx->tx_candidates + // and the one in det_ctx->match_array + // For match_array, we take only the relevant elements where s->app_inspect != NULL + + // Basically, we iterate at the same time over the 2 lists + // comparing and taking an element from either. + + // Trick is to do so in place in det_ctx->tx_candidates, + // so as to minimize the number of moves in det_ctx->tx_candidates. + // For this, the algorithm traverses the lists in reverse order. + // Otherwise, if the first element of match_array was to be put before + // all tx_candidates, we would need to shift all tx_candidates + + // Retain the number of elements sorted in tx_candidates before merge + uint32_t j = *array_idx; + // First loop only counting the number of elements to add + for (uint32_t i = 0; i < det_ctx->match_array_cnt; i++) { + const Signature *s = det_ctx->match_array[i]; + if (s->app_inspect != NULL) { + (*array_idx)++; + } + } + // Future number of elements in tx_candidates after merge + uint32_t k = *array_idx; + + if (k == j) { + // no new element from match_array to merge in tx_candidates + return; + } + + // variable i is for all elements of match_array (even not relevant ones) + // variable j is for elements of tx_candidates before merge + // variable k is for elements of tx_candidates after merge + for (uint32_t i = det_ctx->match_array_cnt; i > 0;) { + const Signature *s = det_ctx->match_array[i - 1]; + if (s->app_inspect == NULL) { + // no relevant element, get the next one from match_array + i--; + continue; + } + // we have one element from match_array to merge in tx_candidates + k--; + if (j > 0) { + // j > 0 means there is still at least one element in tx_candidates to merge + const RuleMatchCandidateTx *s0 = &det_ctx->tx_candidates[j - 1]; + if (s->num <= s0->id) { + // get next element from previous tx_candidates + j--; + // take the element from tx_candidates before merge + det_ctx->tx_candidates[k].s = det_ctx->tx_candidates[j].s; + det_ctx->tx_candidates[k].id = det_ctx->tx_candidates[j].id; + det_ctx->tx_candidates[k].flags = det_ctx->tx_candidates[j].flags; + det_ctx->tx_candidates[k].stream_reset = det_ctx->tx_candidates[j].stream_reset; + continue; + } + } // otherwise + // get next element from match_array + i--; + // take the element from match_array + det_ctx->tx_candidates[k].s = s; + det_ctx->tx_candidates[k].id = s->num; + det_ctx->tx_candidates[k].flags = NULL; + det_ctx->tx_candidates[k].stream_reset = 0; + } + // Even if k > 0 or j > 0, the loop is over. (Note that j == k now) + // The remaining elements in tx_candidates up to k were already sorted + // and come before any other element later in the list +} + static void DetectRunTx(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, @@ -1374,24 +1465,10 @@ static void DetectRunTx(ThreadVars *tv, } /* merge 'state' rules from the regular prefilter */ +#ifdef PROFILING uint32_t x = array_idx; - for (uint32_t i = 0; i < det_ctx->match_array_cnt; i++) { - const Signature *s = det_ctx->match_array[i]; - if (s->app_inspect != NULL) { - const SigIntId id = s->num; - det_ctx->tx_candidates[array_idx].s = s; - det_ctx->tx_candidates[array_idx].id = id; - det_ctx->tx_candidates[array_idx].flags = NULL; - det_ctx->tx_candidates[array_idx].stream_reset = 0; - array_idx++; - - SCLogDebug("%p/%"PRIu64" rule %u (%u) added from 'match' list", - tx.tx_ptr, tx.tx_id, s->id, id); - } - } - do_sort = (array_idx > x); // sort if match added anything - SCLogDebug("%p/%" PRIu64 " rules added from 'match' list: %u", tx.tx_ptr, tx.tx_id, - array_idx - x); +#endif + RuleMatchCandidateMergeStateRules(det_ctx, &array_idx); /* merge stored state into results */ if (tx.de_state != NULL) { @@ -1650,12 +1727,14 @@ static void DetectRunFrames(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngin /* match */ DetectRunPostMatch(tv, det_ctx, p, s); - const uint8_t alert_flags = - (PACKET_ALERT_FLAG_STATE_MATCH | PACKET_ALERT_FLAG_FRAME); + uint8_t alert_flags = (PACKET_ALERT_FLAG_STATE_MATCH | PACKET_ALERT_FLAG_FRAME); det_ctx->flags |= DETECT_ENGINE_THREAD_CTX_FRAME_ID_SET; det_ctx->frame_id = frame->id; SCLogDebug( "%p/%" PRIi64 " sig %u (%u) matched", frame, frame->id, s->id, s->num); + if (frame->flags & FRAME_FLAG_TX_ID_SET) { + alert_flags |= PACKET_ALERT_FLAG_TX; + } AlertQueueAppend(det_ctx, s, p, frame->tx_id, alert_flags); } } @@ -1797,9 +1876,11 @@ TmEcode Detect(ThreadVars *tv, Packet *p, void *data) #ifdef PROFILE_RULES /* aggregate statistics */ - if (SCTIME_SECS(p->ts) != det_ctx->rule_perf_last_sync) { + struct timeval ts; + gettimeofday(&ts, NULL); + if (ts.tv_sec != det_ctx->rule_perf_last_sync) { SCProfilingRuleThreatAggregate(det_ctx); - det_ctx->rule_perf_last_sync = SCTIME_SECS(p->ts); + det_ctx->rule_perf_last_sync = ts.tv_sec; } #endif diff --git a/src/detect.h b/src/detect.h index 04dd49a65a75..f147d0fda8e7 100644 --- a/src/detect.h +++ b/src/detect.h @@ -49,10 +49,16 @@ * classtype. */ #define DETECT_DEFAULT_PRIO 3 +// tx_id value to use when there is no transaction +#define PACKET_ALERT_NOTX UINT64_MAX + /* forward declarations for the structures from detect-engine-sigorder.h */ struct SCSigOrderFunc_; struct SCSigSignatureWrapper_; +/* Forward declarations for structures from Rust. */ +typedef struct SCDetectRequiresStatus SCDetectRequiresStatus; + enum SignatureType { SIG_TYPE_NOT_SET = 0, SIG_TYPE_IPONLY, // rule is handled by IPONLY engine @@ -223,6 +229,7 @@ typedef struct DetectPort_ { struct DetectPort_ *prev; struct DetectPort_ *next; + struct DetectPort_ *last; /* Pointer to the last node in the list */ } DetectPort; /* Signature flags */ @@ -233,7 +240,8 @@ typedef struct DetectPort_ { #define SIG_FLAG_SP_ANY BIT_U32(2) /**< source port is any */ #define SIG_FLAG_DP_ANY BIT_U32(3) /**< destination port is any */ -#define SIG_FLAG_NOALERT BIT_U32(4) /**< no alert flag is set */ +// vacancy + #define SIG_FLAG_DSIZE BIT_U32(5) /**< signature has a dsize setting */ #define SIG_FLAG_APPLAYER BIT_U32(6) /**< signature applies to app layer instead of packets */ @@ -268,6 +276,8 @@ typedef struct DetectPort_ { /** Info for Source and Target identification */ #define SIG_FLAG_DEST_IS_TARGET BIT_U32(26) +#define SIG_FLAG_JA4 BIT_U32(27) /**< signature uses JA4 */ + #define SIG_FLAG_HAS_TARGET (SIG_FLAG_DEST_IS_TARGET|SIG_FLAG_SRC_IS_TARGET) /* signature init flags */ @@ -282,7 +292,7 @@ typedef struct DetectPort_ { #define SIG_FLAG_INIT_PRIO_EXPLICIT \ BIT_U32(8) /**< priority is explicitly set by the priority keyword */ #define SIG_FLAG_INIT_FILEDATA BIT_U32(9) /**< signature has filedata keyword */ -#define SIG_FLAG_INIT_JA3 BIT_U32(10) /**< signature has ja3 keyword */ +#define SIG_FLAG_INIT_JA BIT_U32(10) /**< signature has ja3/ja4 keyword */ #define SIG_FLAG_INIT_OVERFLOW BIT_U32(11) /**< signature has overflown buffers */ /* signature mask flags */ @@ -794,6 +804,7 @@ typedef struct SigFileLoaderStat_ { int total_files; int good_sigs_total; int bad_sigs_total; + int skipped_sigs_total; } SigFileLoaderStat; typedef struct DetectEngineThreadKeywordCtxItem_ { @@ -922,6 +933,9 @@ typedef struct DetectEngineCtx_ { bool sigerror_silent; bool sigerror_ok; + /** The rule errored out due to missing requirements. */ + bool sigerror_requires; + bool filedata_config_initialized; /* specify the configuration for mpm context factory */ @@ -1029,6 +1043,12 @@ typedef struct DetectEngineCtx_ { /* path to the tenant yaml for this engine */ char *tenant_path; + + /* Track rule requirements for reporting after loading rules. */ + SCDetectRequiresStatus *requirements; + + /* number of signatures using filestore, limited as u16 */ + uint16_t filestore_cnt; } DetectEngineCtx; /* Engine groups profiles (low, medium, high, custom) */ diff --git a/src/feature.h b/src/feature.h index 6549c5bbeab5..a1420f4a73ab 100644 --- a/src/feature.h +++ b/src/feature.h @@ -26,6 +26,8 @@ /* Provided feature names */ #define FEATURE_OUTPUT_FILESTORE "output::file-store" +#define FEATURE_JA3 "ja3" +#define FEATURE_JA4 "ja4" void ProvidesFeature(const char *); bool RequiresFeature(const char *); diff --git a/src/flow-bypass.c b/src/flow-bypass.c index 8dbb5ab17d74..10ecf91f494e 100644 --- a/src/flow-bypass.c +++ b/src/flow-bypass.c @@ -93,7 +93,14 @@ static TmEcode BypassedFlowManager(ThreadVars *th_v, void *thread_data) if (!found) return TM_ECODE_OK; + TmThreadsSetFlag(th_v, THV_RUNNING); + while (1) { + if (TmThreadsCheckFlag(th_v, THV_PAUSE)) { + TmThreadsSetFlag(th_v, THV_PAUSED); + TmThreadTestThreadUnPaused(th_v); + TmThreadsUnsetFlag(th_v, THV_PAUSED); + } SCLogDebug("Dumping the table"); gettimeofday(&tv, NULL); TIMEVAL_TO_TIMESPEC(&tv, &curtime); diff --git a/src/flow-timeout.c b/src/flow-timeout.c index 6a9b707c2186..6efa3827a72f 100644 --- a/src/flow-timeout.c +++ b/src/flow-timeout.c @@ -213,7 +213,7 @@ static inline Packet *FlowForceReassemblyPseudoPacketSetup(Packet *p, } p->tcph->th_offx2 = 0x50; - p->tcph->th_flags |= TH_ACK; + p->tcph->th_flags = 0; p->tcph->th_win = 10; p->tcph->th_urp = 0; @@ -341,14 +341,20 @@ int FlowForceReassemblyNeedReassembly(Flow *f) * * The function requires flow to be locked beforehand. * + * Normally, the first thread_id value should be used. This is when the flow is + * created on seeing the first packet to the server; when the flow's reversed + * flag is set, choose the second thread_id (to client/source). + * * \param f Pointer to the flow. * * \retval 0 This flow doesn't need any reassembly processing; 1 otherwise. */ void FlowForceReassemblyForFlow(Flow *f) { - const int thread_id = (int)f->thread_id[0]; - TmThreadsInjectFlowById(f, thread_id); + // Choose the thread_id based on whether the flow has been + // reversed. + int idx = f->flags & FLOW_DIR_REVERSED ? 1 : 0; + TmThreadsInjectFlowById(f, (const int)f->thread_id[idx]); } /** diff --git a/src/flow-worker.c b/src/flow-worker.c index a20e053c59c9..32fbe09381d3 100644 --- a/src/flow-worker.c +++ b/src/flow-worker.c @@ -391,8 +391,16 @@ static inline void FlowWorkerStreamTCPUpdate(ThreadVars *tv, FlowWorkerThreadDat StreamTcp(tv, p, fw->stream_thread, &fw->pq); FLOWWORKER_PROFILING_END(p, PROFILE_FLOWWORKER_STREAM); - if (FlowChangeProto(p->flow)) { + // this is the first packet that sets no payload inspection + bool setting_nopayload = + p->flow->alparser && + AppLayerParserStateIssetFlag(p->flow->alparser, APP_LAYER_PARSER_NO_INSPECTION) && + !(p->flags & PKT_NOPAYLOAD_INSPECTION); + if (FlowChangeProto(p->flow) || setting_nopayload) { StreamTcpDetectLogFlush(tv, fw->stream_thread, p->flow, p, &fw->pq); + if (setting_nopayload) { + FlowSetNoPayloadInspectionFlag(p->flow); + } AppLayerParserStateSetFlag(p->flow->alparser, APP_LAYER_PARSER_EOF_TS); AppLayerParserStateSetFlag(p->flow->alparser, APP_LAYER_PARSER_EOF_TC); } @@ -430,6 +438,10 @@ static inline void FlowWorkerStreamTCPUpdate(ThreadVars *tv, FlowWorkerThreadDat TmqhOutputPacketpool(tv, x); } } + if (FlowChangeProto(p->flow) && p->flow->flags & FLOW_ACTION_DROP) { + // in case f->flags & FLOW_ACTION_DROP was set by one of the dequeued packets + PacketDrop(p, ACTION_DROP, PKT_DROP_REASON_FLOW_DROP); + } } static void FlowWorkerFlowTimeout(ThreadVars *tv, Packet *p, FlowWorkerThreadData *fw, diff --git a/src/flow.c b/src/flow.c index 9783b7883b0b..9e910c4f05f9 100644 --- a/src/flow.c +++ b/src/flow.c @@ -291,6 +291,8 @@ void FlowSwap(Flow *f) FlowSwapFlags(f); FlowSwapFileFlags(f); + SWAP_VARS(FlowThreadId, f->thread_id[0], f->thread_id[1]); + if (f->proto == IPPROTO_TCP) { TcpStreamFlowSwap(f); } @@ -505,7 +507,13 @@ void FlowHandlePacketUpdate(Flow *f, Packet *p, ThreadVars *tv, DecodeThreadVars SCLogDebug("pkt %p FLOW_PKT_ESTABLISHED", p); p->flowflags |= FLOW_PKT_ESTABLISHED; - FlowUpdateState(f, FLOW_STATE_ESTABLISHED); + if ( +#ifdef CAPTURE_OFFLOAD + (f->flow_state != FLOW_STATE_CAPTURE_BYPASSED) && +#endif + (f->flow_state != FLOW_STATE_LOCAL_BYPASSED)) { + FlowUpdateState(f, FLOW_STATE_ESTABLISHED); + } } if (f->flags & FLOW_ACTION_DROP) { diff --git a/src/flow.h b/src/flow.h index 0a730e0ea3b8..9866b568560b 100644 --- a/src/flow.h +++ b/src/flow.h @@ -142,8 +142,9 @@ typedef struct AppLayerParserState_ AppLayerParserState; #define FLOWFILE_NO_SIZE_TS BIT_U16(10) #define FLOWFILE_NO_SIZE_TC BIT_U16(11) -/** store all files in the flow */ -#define FLOWFILE_STORE BIT_U16(12) +/** store files in the flow */ +#define FLOWFILE_STORE_TS BIT_U16(12) +#define FLOWFILE_STORE_TC BIT_U16(13) #define FLOWFILE_NONE_TS (FLOWFILE_NO_MAGIC_TS | \ FLOWFILE_NO_STORE_TS | \ diff --git a/src/interval-tree.h b/src/interval-tree.h new file mode 100644 index 000000000000..934083447982 --- /dev/null +++ b/src/interval-tree.h @@ -0,0 +1,563 @@ +/* $NetBSD: interval-tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */ +/* $OpenBSD: interval-tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */ +/* $FreeBSD$ */ + +/* This is a COPY of the in-tree tree.h modified to accomodate interval + * tree operations */ + +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright 2002 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_INTERVALTREE_H_ +#define _SYS_INTERVALTREE_H_ + +#if defined(__clang_analyzer__) +#define _T_ASSERT(a) assert((a)) +#else +#define _T_ASSERT(a) +#endif + +/* + * This file defines data structures for interval trees which are + * implemented using red-black trees. + * + * A red-black tree is a binary search tree with the node color as an + * extra attribute. It fulfills a set of conditions: + * - every search path from the root to a leaf consists of the + * same number of black nodes, + * - each red node (except for the root) has a black parent, + * - each leaf node is black. + * + * Every operation on a red-black tree is bounded as O(lg n). + * The maximum height of a red-black tree is 2lg (n+1). + */ + +/* Macros that define a red-black tree */ +#define IRB_HEAD(name, type) \ + struct name { \ + struct type *rbh_root; /* root of the tree */ \ + } + +#define IRB_INITIALIZER(root) \ + { \ + NULL \ + } + +#define IRB_INIT(root) \ + do { \ + (root)->rbh_root = NULL; \ + } while (/*CONSTCOND*/ 0) + +#define IRB_BLACK 0 +#define IRB_RED 1 +#define IRB_ENTRY(type) \ + struct { \ + struct type *rbe_left; /* left element */ \ + struct type *rbe_right; /* right element */ \ + struct type *rbe_parent; /* parent element */ \ + int rbe_color; /* node color */ \ + } + +#define IRB_LEFT(elm, field) (elm)->field.rbe_left +#define IRB_RIGHT(elm, field) (elm)->field.rbe_right +#define IRB_PARENT(elm, field) (elm)->field.rbe_parent +#define IRB_COLOR(elm, field) (elm)->field.rbe_color +#define IRB_ROOT(head) (head)->rbh_root +#define IRB_EMPTY(head) (IRB_ROOT(head) == NULL) + +#define IRB_SET(elm, parent, field) \ + do { \ + IRB_PARENT(elm, field) = parent; \ + IRB_LEFT(elm, field) = IRB_RIGHT(elm, field) = NULL; \ + IRB_COLOR(elm, field) = IRB_RED; \ + } while (/*CONSTCOND*/ 0) + +#define IRB_SET_BLACKRED(black, red, field) \ + do { \ + IRB_COLOR(black, field) = IRB_BLACK; \ + IRB_COLOR(red, field) = IRB_RED; \ + } while (/*CONSTCOND*/ 0) + +/* + * The implementation of the following macro has been updated. + * In order to incorporte it properly, the call sites of this + * function have also been updated compared to the standard + * Red Black tree implementation in tree.h of BSD */ +#ifndef IRB_AUGMENT +#define IRB_AUGMENT(x, field) \ + do { \ + if (x != NULL) { \ + x->max = x->port2; \ + if (IRB_LEFT(x, field) != NULL) { \ + x->max = MAX(x->max, IRB_LEFT(x, field)->max); \ + } \ + if (IRB_RIGHT(x, field) != NULL) { \ + x->max = MAX(x->max, IRB_RIGHT(x, field)->max); \ + } \ + } \ + } while (0) +#endif + +#define IRB_ROTATE_LEFT(head, elm, tmp, field) \ + do { \ + (tmp) = IRB_RIGHT(elm, field); \ + if ((IRB_RIGHT(elm, field) = IRB_LEFT(tmp, field)) != NULL) { \ + IRB_PARENT(IRB_LEFT(tmp, field), field) = (elm); \ + } \ + if ((IRB_PARENT(tmp, field) = IRB_PARENT(elm, field)) != NULL) { \ + if ((elm) == IRB_LEFT(IRB_PARENT(elm, field), field)) \ + IRB_LEFT(IRB_PARENT(elm, field), field) = (tmp); \ + else \ + IRB_RIGHT(IRB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + IRB_LEFT(tmp, field) = (elm); \ + IRB_PARENT(elm, field) = (tmp); \ + IRB_AUGMENT(elm, field); \ + IRB_AUGMENT(tmp, field); \ + if ((IRB_PARENT(tmp, field))) \ + IRB_AUGMENT(IRB_PARENT(tmp, field), field); \ + } while (/*CONSTCOND*/ 0) + +#define IRB_ROTATE_RIGHT(head, elm, tmp, field) \ + do { \ + (tmp) = IRB_LEFT(elm, field); \ + if ((IRB_LEFT(elm, field) = IRB_RIGHT(tmp, field)) != NULL) { \ + IRB_PARENT(IRB_RIGHT(tmp, field), field) = (elm); \ + } \ + if ((IRB_PARENT(tmp, field) = IRB_PARENT(elm, field)) != NULL) { \ + if ((elm) == IRB_LEFT(IRB_PARENT(elm, field), field)) \ + IRB_LEFT(IRB_PARENT(elm, field), field) = (tmp); \ + else \ + IRB_RIGHT(IRB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + IRB_RIGHT(tmp, field) = (elm); \ + IRB_PARENT(elm, field) = (tmp); \ + IRB_AUGMENT(elm, field); \ + IRB_AUGMENT(tmp, field); \ + if ((IRB_PARENT(tmp, field))) \ + IRB_AUGMENT(IRB_PARENT(tmp, field), field); \ + } while (/*CONSTCOND*/ 0) + +/* Generates prototypes and inline functions */ +#define IRB_PROTOTYPE(name, type, field, cmp) IRB_PROTOTYPE_INTERNAL(name, type, field, cmp, ) +#define IRB_PROTOTYPE_STATIC(name, type, field, cmp) \ + IRB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static) +#define IRB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ + IRB_PROTOTYPE_INSERT_COLOR(name, type, attr); \ + IRB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \ + IRB_PROTOTYPE_INSERT(name, type, attr); \ + IRB_PROTOTYPE_REMOVE(name, type, attr); \ + IRB_PROTOTYPE_FIND(name, type, attr); \ + IRB_PROTOTYPE_NFIND(name, type, attr); \ + IRB_PROTOTYPE_NEXT(name, type, attr); \ + IRB_PROTOTYPE_PREV(name, type, attr); \ + IRB_PROTOTYPE_MINMAX(name, type, attr); +#define IRB_PROTOTYPE_INSERT_COLOR(name, type, attr) \ + attr void name##_IRB_INSERT_COLOR(struct name *, struct type *) +#define IRB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \ + attr void name##_IRB_REMOVE_COLOR(struct name *, struct type *, struct type *) +#define IRB_PROTOTYPE_REMOVE(name, type, attr) \ + attr struct type *name##_IRB_REMOVE(struct name *, struct type *) +#define IRB_PROTOTYPE_INSERT(name, type, attr) \ + attr struct type *name##_IRB_INSERT(struct name *, struct type *) +#define IRB_PROTOTYPE_FIND(name, type, attr) \ + attr struct type *name##_IRB_FIND(struct name *, struct type *) +#define IRB_PROTOTYPE_NFIND(name, type, attr) \ + attr struct type *name##_IRB_NFIND(struct name *, struct type *) +#define IRB_PROTOTYPE_NEXT(name, type, attr) attr struct type *name##_IRB_NEXT(struct type *) +#define IRB_PROTOTYPE_PREV(name, type, attr) attr struct type *name##_IRB_PREV(struct type *) +#define IRB_PROTOTYPE_MINMAX(name, type, attr) \ + attr struct type *name##_IRB_MINMAX(struct name *, int) + +/* Main rb operation. + * Moves node close to the key of elm to top + */ +#define IRB_GENERATE(name, type, field, cmp) IRB_GENERATE_INTERNAL(name, type, field, cmp, ) +#define IRB_GENERATE_STATIC(name, type, field, cmp) \ + IRB_GENERATE_INTERNAL(name, type, field, cmp, __unused static) +#define IRB_GENERATE_INTERNAL(name, type, field, cmp, attr) \ + IRB_GENERATE_INSERT_COLOR(name, type, field, attr) \ + IRB_GENERATE_REMOVE_COLOR(name, type, field, attr) \ + IRB_GENERATE_INSERT(name, type, field, cmp, attr) \ + IRB_GENERATE_REMOVE(name, type, field, attr) \ + IRB_GENERATE_FIND(name, type, field, cmp, attr) \ + IRB_GENERATE_NFIND(name, type, field, cmp, attr) \ + IRB_GENERATE_NEXT(name, type, field, attr) \ + IRB_GENERATE_PREV(name, type, field, attr) \ + IRB_GENERATE_MINMAX(name, type, field, attr) + +#define IRB_GENERATE_INSERT_COLOR(name, type, field, attr) \ + attr void name##_IRB_INSERT_COLOR(struct name *head, struct type *elm) \ + { \ + struct type *parent, *gparent, *tmp; \ + while ((parent = IRB_PARENT(elm, field)) != NULL && IRB_COLOR(parent, field) == IRB_RED) { \ + gparent = IRB_PARENT(parent, field); \ + _T_ASSERT(gparent); \ + if (parent == IRB_LEFT(gparent, field)) { \ + tmp = IRB_RIGHT(gparent, field); \ + if (tmp && IRB_COLOR(tmp, field) == IRB_RED) { \ + IRB_COLOR(tmp, field) = IRB_BLACK; \ + IRB_SET_BLACKRED(parent, gparent, field); \ + elm = gparent; \ + continue; \ + } \ + if (IRB_RIGHT(parent, field) == elm) { \ + IRB_ROTATE_LEFT(head, parent, tmp, field); \ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + IRB_SET_BLACKRED(parent, gparent, field); \ + IRB_ROTATE_RIGHT(head, gparent, tmp, field); \ + } else { \ + tmp = IRB_LEFT(gparent, field); \ + if (tmp && IRB_COLOR(tmp, field) == IRB_RED) { \ + IRB_COLOR(tmp, field) = IRB_BLACK; \ + IRB_SET_BLACKRED(parent, gparent, field); \ + elm = gparent; \ + continue; \ + } \ + if (IRB_LEFT(parent, field) == elm) { \ + IRB_ROTATE_RIGHT(head, parent, tmp, field); \ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + IRB_SET_BLACKRED(parent, gparent, field); \ + IRB_ROTATE_LEFT(head, gparent, tmp, field); \ + } \ + } \ + IRB_COLOR(head->rbh_root, field) = IRB_BLACK; \ + } + +#define IRB_GENERATE_REMOVE_COLOR(name, type, field, attr) \ + attr void name##_IRB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \ + { \ + struct type *tmp; \ + while ((elm == NULL || IRB_COLOR(elm, field) == IRB_BLACK) && elm != IRB_ROOT(head)) { \ + if (IRB_LEFT(parent, field) == elm) { \ + tmp = IRB_RIGHT(parent, field); \ + if (IRB_COLOR(tmp, field) == IRB_RED) { \ + IRB_SET_BLACKRED(tmp, parent, field); \ + IRB_ROTATE_LEFT(head, parent, tmp, field); \ + tmp = IRB_RIGHT(parent, field); \ + } \ + _T_ASSERT(tmp); \ + if ((IRB_LEFT(tmp, field) == NULL || \ + IRB_COLOR(IRB_LEFT(tmp, field), field) == IRB_BLACK) && \ + (IRB_RIGHT(tmp, field) == NULL || \ + IRB_COLOR(IRB_RIGHT(tmp, field), field) == IRB_BLACK)) { \ + IRB_COLOR(tmp, field) = IRB_RED; \ + elm = parent; \ + parent = IRB_PARENT(elm, field); \ + } else { \ + if (IRB_RIGHT(tmp, field) == NULL || \ + IRB_COLOR(IRB_RIGHT(tmp, field), field) == IRB_BLACK) { \ + struct type *oleft; \ + if ((oleft = IRB_LEFT(tmp, field)) != NULL) \ + IRB_COLOR(oleft, field) = IRB_BLACK; \ + IRB_COLOR(tmp, field) = IRB_RED; \ + IRB_ROTATE_RIGHT(head, tmp, oleft, field); \ + tmp = IRB_RIGHT(parent, field); \ + } \ + IRB_COLOR(tmp, field) = IRB_COLOR(parent, field); \ + IRB_COLOR(parent, field) = IRB_BLACK; \ + if (IRB_RIGHT(tmp, field)) \ + IRB_COLOR(IRB_RIGHT(tmp, field), field) = IRB_BLACK; \ + IRB_ROTATE_LEFT(head, parent, tmp, field); \ + elm = IRB_ROOT(head); \ + break; \ + } \ + } else { \ + tmp = IRB_LEFT(parent, field); \ + if (IRB_COLOR(tmp, field) == IRB_RED) { \ + IRB_SET_BLACKRED(tmp, parent, field); \ + IRB_ROTATE_RIGHT(head, parent, tmp, field); \ + tmp = IRB_LEFT(parent, field); \ + } \ + _T_ASSERT(tmp); \ + if ((IRB_LEFT(tmp, field) == NULL || \ + IRB_COLOR(IRB_LEFT(tmp, field), field) == IRB_BLACK) && \ + (IRB_RIGHT(tmp, field) == NULL || \ + IRB_COLOR(IRB_RIGHT(tmp, field), field) == IRB_BLACK)) { \ + IRB_COLOR(tmp, field) = IRB_RED; \ + elm = parent; \ + parent = IRB_PARENT(elm, field); \ + } else { \ + if (IRB_LEFT(tmp, field) == NULL || \ + IRB_COLOR(IRB_LEFT(tmp, field), field) == IRB_BLACK) { \ + struct type *oright; \ + if ((oright = IRB_RIGHT(tmp, field)) != NULL) \ + IRB_COLOR(oright, field) = IRB_BLACK; \ + IRB_COLOR(tmp, field) = IRB_RED; \ + IRB_ROTATE_LEFT(head, tmp, oright, field); \ + tmp = IRB_LEFT(parent, field); \ + } \ + IRB_COLOR(tmp, field) = IRB_COLOR(parent, field); \ + IRB_COLOR(parent, field) = IRB_BLACK; \ + if (IRB_LEFT(tmp, field)) \ + IRB_COLOR(IRB_LEFT(tmp, field), field) = IRB_BLACK; \ + IRB_ROTATE_RIGHT(head, parent, tmp, field); \ + elm = IRB_ROOT(head); \ + break; \ + } \ + } \ + } \ + if (elm) \ + IRB_COLOR(elm, field) = IRB_BLACK; \ + } + +#define IRB_GENERATE_REMOVE(name, type, field, attr) \ + attr struct type *name##_IRB_REMOVE(struct name *head, struct type *elm) \ + { \ + struct type *child, *parent, *old = elm; \ + int color; \ + if (IRB_LEFT(elm, field) == NULL) \ + child = IRB_RIGHT(elm, field); \ + else if (IRB_RIGHT(elm, field) == NULL) \ + child = IRB_LEFT(elm, field); \ + else { \ + struct type *left; \ + elm = IRB_RIGHT(elm, field); \ + while ((left = IRB_LEFT(elm, field)) != NULL) \ + elm = left; \ + child = IRB_RIGHT(elm, field); \ + parent = IRB_PARENT(elm, field); \ + color = IRB_COLOR(elm, field); \ + if (child) \ + IRB_PARENT(child, field) = parent; \ + if (parent) { \ + if (IRB_LEFT(parent, field) == elm) \ + IRB_LEFT(parent, field) = child; \ + else \ + IRB_RIGHT(parent, field) = child; \ + IRB_AUGMENT(parent, field); \ + } else \ + IRB_ROOT(head) = child; \ + if (IRB_PARENT(elm, field) == old) \ + parent = elm; \ + _T_ASSERT((old)); \ + (elm)->field = (old)->field; \ + if (IRB_PARENT(old, field)) { \ + if (IRB_LEFT(IRB_PARENT(old, field), field) == old) \ + IRB_LEFT(IRB_PARENT(old, field), field) = elm; \ + else \ + IRB_RIGHT(IRB_PARENT(old, field), field) = elm; \ + IRB_AUGMENT(IRB_PARENT(old, field), field); \ + } else \ + IRB_ROOT(head) = elm; \ + _T_ASSERT(old); \ + _T_ASSERT(IRB_LEFT(old, field)); \ + IRB_PARENT(IRB_LEFT(old, field), field) = elm; \ + if (IRB_RIGHT(old, field)) \ + IRB_PARENT(IRB_RIGHT(old, field), field) = elm; \ + if (parent) { \ + left = parent; \ + do { \ + IRB_AUGMENT(left, field); \ + } while ((left = IRB_PARENT(left, field)) != NULL); \ + } \ + goto color; \ + } \ + parent = IRB_PARENT(elm, field); \ + color = IRB_COLOR(elm, field); \ + if (child) \ + IRB_PARENT(child, field) = parent; \ + if (parent) { \ + if (IRB_LEFT(parent, field) == elm) \ + IRB_LEFT(parent, field) = child; \ + else \ + IRB_RIGHT(parent, field) = child; \ + IRB_AUGMENT(parent, field); \ + } else \ + IRB_ROOT(head) = child; \ + color: \ + if (color == IRB_BLACK) \ + name##_IRB_REMOVE_COLOR(head, parent, child); \ + return (old); \ + } + +#define IRB_GENERATE_INSERT(name, type, field, cmp, attr) \ + /* Inserts a node into the IRB tree */ \ + attr struct type *name##_IRB_INSERT(struct name *head, struct type *elm) \ + { \ + struct type *tmp; \ + struct type *parent = NULL; \ + int comp = 0; \ + tmp = IRB_ROOT(head); \ + while (tmp) { \ + parent = tmp; \ + comp = (cmp)(elm, parent); \ + if (comp < 0) { \ + tmp = IRB_LEFT(tmp, field); \ + } else if (comp > 0) { \ + tmp = IRB_RIGHT(tmp, field); \ + } else \ + return (tmp); \ + } \ + IRB_SET(elm, parent, field); \ + if (parent != NULL) { \ + if (comp < 0) \ + IRB_LEFT(parent, field) = elm; \ + else \ + IRB_RIGHT(parent, field) = elm; \ + } else \ + IRB_ROOT(head) = elm; \ + IRB_AUGMENT(elm, field); \ + name##_IRB_INSERT_COLOR(head, elm); \ + return (NULL); \ + } + +#define IRB_GENERATE_FIND(name, type, field, cmp, attr) \ + /* Finds the node with the same key as elm */ \ + attr struct type *name##_IRB_FIND(struct name *head, struct type *elm) \ + { \ + struct type *tmp = IRB_ROOT(head); \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) \ + tmp = IRB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = IRB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (NULL); \ + } + +#define IRB_GENERATE_NFIND(name, type, field, cmp, attr) \ + /* Finds the first node greater than or equal to the search key */ \ + attr struct type *name##_IRB_NFIND(struct name *head, struct type *elm) \ + { \ + struct type *tmp = IRB_ROOT(head); \ + struct type *res = NULL; \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) { \ + res = tmp; \ + tmp = IRB_LEFT(tmp, field); \ + } else if (comp > 0) \ + tmp = IRB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (res); \ + } + +#define IRB_GENERATE_NEXT(name, type, field, attr) \ + /* ARGSUSED */ \ + attr struct type *name##_IRB_NEXT(struct type *elm) \ + { \ + if (IRB_RIGHT(elm, field)) { \ + elm = IRB_RIGHT(elm, field); \ + while (IRB_LEFT(elm, field)) \ + elm = IRB_LEFT(elm, field); \ + } else { \ + if (IRB_PARENT(elm, field) && (elm == IRB_LEFT(IRB_PARENT(elm, field), field))) \ + elm = IRB_PARENT(elm, field); \ + else { \ + while (IRB_PARENT(elm, field) && \ + (elm == IRB_RIGHT(IRB_PARENT(elm, field), field))) \ + elm = IRB_PARENT(elm, field); \ + elm = IRB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ + } + +#define IRB_GENERATE_PREV(name, type, field, attr) \ + /* ARGSUSED */ \ + attr struct type *name##_IRB_PREV(struct type *elm) \ + { \ + if (IRB_LEFT(elm, field)) { \ + elm = IRB_LEFT(elm, field); \ + while (IRB_RIGHT(elm, field)) \ + elm = IRB_RIGHT(elm, field); \ + } else { \ + if (IRB_PARENT(elm, field) && (elm == IRB_RIGHT(IRB_PARENT(elm, field), field))) \ + elm = IRB_PARENT(elm, field); \ + else { \ + while (IRB_PARENT(elm, field) && (elm == IRB_LEFT(IRB_PARENT(elm, field), field))) \ + elm = IRB_PARENT(elm, field); \ + elm = IRB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ + } + +#define IRB_GENERATE_MINMAX(name, type, field, attr) \ + attr struct type *name##_IRB_MINMAX(struct name *head, int val) \ + { \ + struct type *tmp = IRB_ROOT(head); \ + struct type *parent = NULL; \ + while (tmp) { \ + parent = tmp; \ + if (val < 0) \ + tmp = IRB_LEFT(tmp, field); \ + else \ + tmp = IRB_RIGHT(tmp, field); \ + } \ + return (parent); \ + } + +#define IRB_NEGINF -1 +#define IRB_INF 1 + +#define IRB_INSERT(name, x, y) name##_IRB_INSERT(x, y) +#define IRB_REMOVE(name, x, y) name##_IRB_REMOVE(x, y) +#define IRB_FIND(name, x, y) name##_IRB_FIND(x, y) +#define IRB_NFIND(name, x, y) name##_IRB_NFIND(x, y) +#define IRB_NEXT(name, x, y) name##_IRB_NEXT(y) +#define IRB_PREV(name, x, y) name##_IRB_PREV(y) +#define IRB_MIN(name, x) name##_IRB_MINMAX(x, IRB_NEGINF) +#define IRB_MAX(name, x) name##_IRB_MINMAX(x, IRB_INF) + +#define IRB_FOREACH(x, name, head) \ + for ((x) = IRB_MIN(name, head); (x) != NULL; (x) = name##_IRB_NEXT(x)) + +#define IRB_FOREACH_FROM(x, name, y) \ + for ((x) = (y); ((x) != NULL) && ((y) = name##_IRB_NEXT(x), (x) != NULL); (x) = (y)) + +#define IRB_FOREACH_SAFE(x, name, head, y) \ + for ((x) = IRB_MIN(name, head); ((x) != NULL) && ((y) = name##_IRB_NEXT(x), (x) != NULL); \ + (x) = (y)) + +#define IRB_FOREACH_REVERSE(x, name, head) \ + for ((x) = IRB_MAX(name, head); (x) != NULL; (x) = name##_IRB_PREV(x)) + +#define IRB_FOREACH_REVERSE_FROM(x, name, y) \ + for ((x) = (y); ((x) != NULL) && ((y) = name##_IRB_PREV(x), (x) != NULL); (x) = (y)) + +#define IRB_FOREACH_REVERSE_SAFE(x, name, head, y) \ + for ((x) = IRB_MAX(name, head); ((x) != NULL) && ((y) = name##_IRB_PREV(x), (x) != NULL); \ + (x) = (y)) + +#endif /* _SYS_INTERVALTREE_H_ */ diff --git a/src/log-pcap.c b/src/log-pcap.c index 1e1b6da1fb55..d997471e36fc 100644 --- a/src/log-pcap.c +++ b/src/log-pcap.c @@ -86,6 +86,7 @@ typedef enum LogModeConditionalType_ { #define PCAP_SNAPLEN 262144 #define PCAP_BUFFER_TIMEOUT 1000000 // microseconds +#define PCAP_PKTHDR_SIZE 16 SC_ATOMIC_DECLARE(uint32_t, thread_cnt); @@ -621,11 +622,11 @@ static int PcapLog (ThreadVars *t, void *thread_data, const Packet *p) rp = p->root; pl->h->caplen = GET_PKT_LEN(rp); pl->h->len = GET_PKT_LEN(rp); - len = sizeof(*pl->h) + GET_PKT_LEN(rp); + len = PCAP_PKTHDR_SIZE + GET_PKT_LEN(rp); } else { pl->h->caplen = GET_PKT_LEN(p); pl->h->len = GET_PKT_LEN(p); - len = sizeof(*pl->h) + GET_PKT_LEN(p); + len = PCAP_PKTHDR_SIZE + GET_PKT_LEN(p); } if (pl->filename == NULL) { @@ -708,11 +709,11 @@ static int PcapLog (ThreadVars *t, void *thread_data, const Packet *p) rp = p->root; pl->h->caplen = GET_PKT_LEN(rp); pl->h->len = GET_PKT_LEN(rp); - len = sizeof(*pl->h) + GET_PKT_LEN(rp); + len = PCAP_PKTHDR_SIZE + GET_PKT_LEN(rp); } else { pl->h->caplen = GET_PKT_LEN(p); pl->h->len = GET_PKT_LEN(p); - len = sizeof(*pl->h) + GET_PKT_LEN(p); + len = PCAP_PKTHDR_SIZE + GET_PKT_LEN(p); } } } @@ -1437,6 +1438,8 @@ static OutputInitResult PcapLogInitCtx(ConfNode *conf) if (s_mode != NULL) { if (strcasecmp(s_mode, "sguil") == 0) { pl->mode = LOGMODE_SGUIL; + SCLogWarning("sguil mode is deprecated and will be removed from Suricata 8; see " + "issue 6688"); } else if (strcasecmp(s_mode, "multi") == 0) { pl->mode = LOGMODE_MULTI; } else if (strcasecmp(s_mode, "normal") != 0) { diff --git a/src/output-filestore.c b/src/output-filestore.c index dcf4c1aea502..d23560c31e44 100644 --- a/src/output-filestore.c +++ b/src/output-filestore.c @@ -194,12 +194,8 @@ static int OutputFilestoreLogger(ThreadVars *tv, void *thread_data, const Packet SCLogDebug("ff %p, data %p, data_len %u", ff, data, data_len); - char base_filename[PATH_MAX] = ""; - snprintf(base_filename, sizeof(base_filename), "%s/file.%u", - ctx->tmpdir, ff->file_store_id); - snprintf(filename, sizeof(filename), "%s", base_filename); - if (flags & OUTPUT_FILEDATA_FLAG_OPEN) { + snprintf(filename, sizeof(filename), "%s/file.%u", ctx->tmpdir, ff->file_store_id); file_fd = open(filename, O_CREAT | O_TRUNC | O_NOFOLLOW | O_WRONLY, 0644); if (file_fd == -1) { @@ -220,6 +216,7 @@ static int OutputFilestoreLogger(ThreadVars *tv, void *thread_data, const Packet /* we can get called with a NULL ffd when we need to close */ } else if (data != NULL) { if (ff->fd == -1) { + snprintf(filename, sizeof(filename), "%s/file.%u", ctx->tmpdir, ff->file_store_id); file_fd = open(filename, O_APPEND | O_NOFOLLOW | O_WRONLY); if (file_fd == -1) { StatsIncr(tv, aft->fs_error_counter); @@ -235,6 +232,7 @@ static int OutputFilestoreLogger(ThreadVars *tv, void *thread_data, const Packet if (file_fd != -1) { ssize_t r = write(file_fd, (const void *)data, (size_t)data_len); if (r == -1) { + snprintf(filename, sizeof(filename), "%s/file.%u", ctx->tmpdir, ff->file_store_id); StatsIncr(tv, aft->fs_error_counter); WARN_ONCE(WOT_WRITE, "Filestore (v2) failed to write to %s: %s", filename, strerror(errno)); diff --git a/src/output-json-alert.c b/src/output-json-alert.c index a7df1065509e..072e54b61638 100644 --- a/src/output-json-alert.c +++ b/src/output-json-alert.c @@ -137,13 +137,13 @@ static int AlertJsonDumpStreamSegmentCallback( return 1; } -static void AlertJsonTls(const Flow *f, JsonBuilder *js) +static void AlertJsonTls(const Flow *f, const uint32_t sig_flags, JsonBuilder *js) { SSLState *ssl_state = (SSLState *)FlowGetAppState(f); if (ssl_state) { jb_open_object(js, "tls"); - JsonTlsLogJSONExtended(js, ssl_state); + JsonTlsLogJSONExtended(js, ssl_state, sig_flags & SIG_FLAG_JA4); jb_close(js); } @@ -467,8 +467,8 @@ static void AlertAddPayload(AlertJsonOutputCtx *json_output_ctx, JsonBuilder *js } } -static void AlertAddAppLayer(const Packet *p, JsonBuilder *jb, - const uint64_t tx_id, const uint16_t option_flags) +static void AlertAddAppLayer(const Packet *p, JsonBuilder *jb, const uint64_t tx_id, + const uint32_t sig_flags, const uint16_t option_flags) { const AppProto proto = FlowGetAppProtocol(p->flow); JsonBuilderMark mark = { 0, 0, 0 }; @@ -487,7 +487,7 @@ static void AlertAddAppLayer(const Packet *p, JsonBuilder *jb, jb_close(jb); break; case ALPROTO_TLS: - AlertJsonTls(p->flow, jb); + AlertJsonTls(p->flow, sig_flags, jb); break; case ALPROTO_SSH: AlertJsonSsh(p->flow, jb); @@ -573,7 +573,7 @@ static void AlertAddAppLayer(const Packet *p, JsonBuilder *jb, break; case ALPROTO_QUIC: jb_get_mark(jb, &mark); - if (!JsonQuicAddMetadata(p->flow, tx_id, jb)) { + if (!JsonQuicAddMetadata(p->flow, sig_flags, tx_id, jb)) { jb_restore_mark(jb, &mark); } break; @@ -782,12 +782,14 @@ static int AlertJson(ThreadVars *tv, JsonAlertLogThread *aft, const Packet *p) } if (p->flow != NULL) { - if (json_output_ctx->flags & LOG_JSON_APP_LAYER) { - AlertAddAppLayer(p, jb, pa->tx_id, json_output_ctx->flags); - } - /* including fileinfo data is configured by the metadata setting */ - if (json_output_ctx->flags & LOG_JSON_RULE_METADATA) { - AlertAddFiles(p, jb, pa->tx_id); + if (pa->flags & PACKET_ALERT_FLAG_TX) { + if (json_output_ctx->flags & LOG_JSON_APP_LAYER) { + AlertAddAppLayer(p, jb, pa->tx_id, pa->s->flags, json_output_ctx->flags); + } + /* including fileinfo data is configured by the metadata setting */ + if (json_output_ctx->flags & LOG_JSON_RULE_METADATA) { + AlertAddFiles(p, jb, pa->tx_id); + } } EveAddAppProto(p->flow, jb); diff --git a/src/output-json-email-common.c b/src/output-json-email-common.c index 31d855758579..c96188e48780 100644 --- a/src/output-json-email-common.c +++ b/src/output-json-email-common.c @@ -97,27 +97,49 @@ static inline char *SkipWhiteSpaceTill(char *p, char *savep) static bool EveEmailJsonArrayFromCommaList(JsonBuilder *js, const uint8_t *val, size_t len) { - char *savep = NULL; - char *p; - char *sp; - char *to_line = BytesToString((uint8_t *)val, len); - if (likely(to_line != NULL)) { - p = strtok_r(to_line, ",", &savep); - if (p == NULL) { - SCFree(to_line); - return false; - } - sp = SkipWhiteSpaceTill(p, savep); - jb_append_string(js, sp); - while ((p = strtok_r(NULL, ",", &savep)) != NULL) { - sp = SkipWhiteSpaceTill(p, savep); - jb_append_string(js, sp); + bool has_not_empty_field = false; + size_t start = 0; + int state = 0; + + for (size_t i = 0; i < len; i++) { + switch (state) { + case 0: + if (val[i] == ' ' || val[i] == '\t') { + // skip leading space + start += 1; + } else if (val[i] == '"') { + // quoted state + state = 2; + } else { + // field + state = 1; + } + break; + case 1: // field + if (val[i] == ',') { + if (i > start) { + jb_append_string_from_bytes(js, val + start, i - start); + has_not_empty_field = true; + } + start = i + 1; + state = 0; + } else if (val[i] == '"') { + // quoted + state = 2; + } + break; + case 2: // quoted + if (val[i] == '"') { + // out of quotes, back to field + state = 1; + } } - } else { - return false; } - SCFree(to_line); - return true; + if (len > start) { + jb_append_string_from_bytes(js, val + start, len - start); + has_not_empty_field = true; + } + return has_not_empty_field; } static void EveEmailLogJSONMd5(OutputJsonEmailCtx *email_ctx, JsonBuilder *js, SMTPTransaction *tx) @@ -149,9 +171,7 @@ static int JsonEmailAddToJsonArray(const uint8_t *val, size_t len, void *data) if (ajs == NULL) return 0; - char *value = BytesToString((uint8_t *)val, len); - jb_append_string(ajs, value); - SCFree(value); + jb_append_string_from_bytes(ajs, val, (uint32_t)len); return 1; } @@ -193,12 +213,8 @@ static void EveEmailLogJSONCustom(OutputJsonEmailCtx *email_ctx, JsonBuilder *js } else { field = MimeDecFindField(entity, email_fields[f].email_field); if (field != NULL) { - char *s = BytesToString((uint8_t *)field->value, - (size_t)field->value_len); - if (likely(s != NULL)) { - jb_set_string(js, email_fields[f].config_field, s); - SCFree(s); - } + jb_set_string_from_bytes( + js, email_fields[f].config_field, field->value, field->value_len); } } @@ -295,19 +311,14 @@ static bool EveEmailLogJsonData(const Flow *f, void *state, void *vtx, uint64_t bool has_ipv4_url = false; bool has_exe_url = false; for (url = entity->url_list; url != NULL; url = url->next) { - char *s = BytesToString((uint8_t *)url->url, - (size_t)url->url_len); - if (s != NULL) { - jb_append_string(js_url, s); - if (url->url_flags & URL_IS_EXE) - has_exe_url = true; - if (url->url_flags & URL_IS_IP6) - has_ipv6_url = true; - if (url->url_flags & URL_IS_IP4) - has_ipv6_url = true; - SCFree(s); - url_cnt += 1; - } + jb_append_string_from_bytes(js_url, url->url, url->url_len); + if (url->url_flags & URL_IS_EXE) + has_exe_url = true; + if (url->url_flags & URL_IS_IP6) + has_ipv6_url = true; + if (url->url_flags & URL_IS_IP4) + has_ipv6_url = true; + url_cnt += 1; } jb_set_bool(sjs, "has_ipv6_url", has_ipv6_url); jb_set_bool(sjs, "has_ipv4_url", has_ipv4_url); @@ -315,23 +326,14 @@ static bool EveEmailLogJsonData(const Flow *f, void *state, void *vtx, uint64_t } for (entity = entity->child; entity != NULL; entity = entity->next) { if (entity->ctnt_flags & CTNT_IS_ATTACHMENT) { - - char *s = BytesToString((uint8_t *)entity->filename, - (size_t)entity->filename_len); - jb_append_string(js_attach, s); - SCFree(s); + jb_append_string_from_bytes(js_attach, entity->filename, entity->filename_len); attach_cnt += 1; } if (entity->url_list != NULL) { MimeDecUrl *url; for (url = entity->url_list; url != NULL; url = url->next) { - char *s = BytesToString((uint8_t *)url->url, - (size_t)url->url_len); - if (s != NULL) { - jb_append_string(js_url, s); - SCFree(s); - url_cnt += 1; - } + jb_append_string_from_bytes(js_url, url->url, url->url_len); + url_cnt += 1; } } } diff --git a/src/output-json-flow.c b/src/output-json-flow.c index 07bcd954f2f5..487185f2ed5b 100644 --- a/src/output-json-flow.c +++ b/src/output-json-flow.c @@ -281,7 +281,7 @@ static void EveFlowLogJSON(OutputJsonThreadCtx *aft, JsonBuilder *jb, Flow *f) /* Close flow. */ jb_close(jb); - EveAddCommonOptions(&aft->ctx->cfg, NULL, f, jb); + EveAddCommonOptions(&aft->ctx->cfg, NULL, f, jb, LOG_DIR_FLOW); /* TCP */ if (f->proto == IPPROTO_TCP) { diff --git a/src/output-json-netflow.c b/src/output-json-netflow.c index 2ac6995cfad6..98873e5f063f 100644 --- a/src/output-json-netflow.c +++ b/src/output-json-netflow.c @@ -275,7 +275,7 @@ static int JsonNetFlowLogger(ThreadVars *tv, void *thread_data, Flow *f) if (unlikely(jb == NULL)) return TM_ECODE_OK; NetFlowLogEveToServer(jb, f); - EveAddCommonOptions(&jhl->ctx->cfg, NULL, f, jb); + EveAddCommonOptions(&jhl->ctx->cfg, NULL, f, jb, LOG_DIR_FLOW_TOSERVER); OutputJsonBuilderBuffer(jb, jhl); jb_free(jb); @@ -285,7 +285,7 @@ static int JsonNetFlowLogger(ThreadVars *tv, void *thread_data, Flow *f) if (unlikely(jb == NULL)) return TM_ECODE_OK; NetFlowLogEveToClient(jb, f); - EveAddCommonOptions(&jhl->ctx->cfg, NULL, f, jb); + EveAddCommonOptions(&jhl->ctx->cfg, NULL, f, jb, LOG_DIR_FLOW_TOCLIENT); OutputJsonBuilderBuffer(jb, jhl); jb_free(jb); } diff --git a/src/output-json-quic.c b/src/output-json-quic.c index fdf2d0f09340..3934f1895d93 100644 --- a/src/output-json-quic.c +++ b/src/output-json-quic.c @@ -35,6 +35,7 @@ #include "output.h" #include "output-json.h" #include "app-layer.h" +#include "app-layer-ssl.h" #include "app-layer-parser.h" #include "output-json-quic.h" #include "rust.h" @@ -42,6 +43,7 @@ typedef struct LogQuicFileCtx_ { LogFileCtx *file_ctx; OutputJsonCtx *eve_ctx; + bool log_ja4; } LogQuicFileCtx; typedef struct JsonQuicLogThread_ { @@ -59,7 +61,9 @@ static int JsonQuicLogger(ThreadVars *tv, void *thread_data, const Packet *p, Fl if (unlikely(js == NULL)) { return TM_ECODE_OK; } - if (!rs_quic_to_json(tx, js)) { + + LogQuicFileCtx *quic_ctx = thread->quiclog_ctx; + if (!rs_quic_to_json(tx, quic_ctx->log_ja4, js)) { jb_free(js); return TM_ECODE_FAILED; } @@ -93,6 +97,13 @@ static OutputInitResult OutputQuicLogInitSub(ConfNode *conf, OutputCtx *parent_c SCFree(quiclog_ctx); return result; } + + /* In 7.0.x, ja4 hash is only logged when requested */ + quiclog_ctx->log_ja4 = false; + const char *ja4 = ConfNodeLookupChildValue(conf, "ja4"); + if (ja4 && ConfValIsTrue(ja4)) { + quiclog_ctx->log_ja4 = true; + } output_ctx->data = quiclog_ctx; output_ctx->DeInit = OutputQuicLogDeInitCtxSub; @@ -140,13 +151,13 @@ static TmEcode JsonQuicLogThreadDeinit(ThreadVars *t, void *data) return TM_ECODE_OK; } -bool JsonQuicAddMetadata(const Flow *f, uint64_t tx_id, JsonBuilder *js) +bool JsonQuicAddMetadata(const Flow *f, const uint32_t sig_flags, uint64_t tx_id, JsonBuilder *js) { void *state = FlowGetAppState(f); if (state) { void *tx = AppLayerParserGetTx(f->proto, ALPROTO_QUIC, state, tx_id); if (tx) { - return rs_quic_to_json(tx, js); + return rs_quic_to_json(tx, sig_flags & SIG_FLAG_JA4, js); } } diff --git a/src/output-json-quic.h b/src/output-json-quic.h index 2448d5063a34..71b434e11402 100644 --- a/src/output-json-quic.h +++ b/src/output-json-quic.h @@ -22,7 +22,7 @@ #ifndef __OUTPUT_JSON_QUIC_H__ #define __OUTPUT_JSON_QUIC_H__ -bool JsonQuicAddMetadata(const Flow *f, uint64_t tx_id, JsonBuilder *js); +bool JsonQuicAddMetadata(const Flow *f, const uint32_t sig_flags, uint64_t tx_id, JsonBuilder *js); void JsonQuicLogRegister(void); #endif /* __OUTPUT_JSON_QUIC_H__ */ diff --git a/src/output-json-stats.c b/src/output-json-stats.c index 7bfcfc58cad2..7cc880727dce 100644 --- a/src/output-json-stats.c +++ b/src/output-json-stats.c @@ -36,6 +36,7 @@ #include "util-print.h" #include "util-time.h" #include "util-unittest.h" +#include "util-validate.h" #include "util-debug.h" #include "output.h" @@ -98,6 +99,7 @@ static json_t *EngineStats2Json(const DetectEngineCtx *de_ctx, json_integer(sig_stat->good_sigs_total)); json_object_set_new(jdata, "rules_failed", json_integer(sig_stat->bad_sigs_total)); + json_object_set_new(jdata, "rules_skipped", json_integer(sig_stat->skipped_sigs_total)); } return jdata; @@ -263,21 +265,38 @@ json_t *StatsToJSON(const StatsTable *st, uint8_t flags) uint32_t x; for (x = 0; x < st->ntstats; x++) { uint32_t offset = x * st->nstats; + const char *tm_name = NULL; + json_t *thread = NULL; /* for each counter */ for (u = offset; u < (offset + st->nstats); u++) { if (st->tstats[u].name == NULL) continue; + DEBUG_VALIDATE_BUG_ON(st->tstats[u].tm_name == NULL); + + if (tm_name == NULL) { + // First time we see a set tm_name. Remember it + // and allocate the stats object for this thread. + tm_name = st->tstats[u].tm_name; + thread = json_object(); + if (unlikely(thread == NULL)) { + json_decref(js_stats); + json_decref(threads); + return NULL; + } + } else { + DEBUG_VALIDATE_BUG_ON(strcmp(tm_name, st->tstats[u].tm_name) != 0); + DEBUG_VALIDATE_BUG_ON(thread == NULL); + } + json_t *js_type = NULL; const char *stat_name = st->tstats[u].short_name; if (st->tstats[u].short_name == NULL) { stat_name = st->tstats[u].name; js_type = threads; } else { - char str[256]; - snprintf(str, sizeof(str), "%s.%s", st->tstats[u].tm_name, st->tstats[u].name); - js_type = OutputStats2Json(threads, str); + js_type = OutputStats2Json(thread, st->tstats[u].name); } if (js_type != NULL) { @@ -291,6 +310,10 @@ json_t *StatsToJSON(const StatsTable *st, uint8_t flags) } } } + if (tm_name != NULL) { + DEBUG_VALIDATE_BUG_ON(thread == NULL); + json_object_set_new(threads, tm_name, thread); + } } json_object_set_new(js_stats, "threads", threads); } @@ -470,3 +493,7 @@ void JsonStatsLogRegister(void) { "eve-log.stats", OutputStatsLogInitSub, JsonStatsLogger, JsonStatsLogThreadInit, JsonStatsLogThreadDeinit, NULL); } + +#ifdef UNITTESTS +#include "tests/output-json-stats.c" +#endif diff --git a/src/output-json-stats.h b/src/output-json-stats.h index 9b96d5001298..b569e30b6429 100644 --- a/src/output-json-stats.h +++ b/src/output-json-stats.h @@ -35,4 +35,6 @@ TmEcode OutputEngineStatsReloadTime(json_t **jdata); TmEcode OutputEngineStatsRuleset(json_t **jdata); void JsonStatsLogRegister(void); +void OutputJsonStatsRegisterTests(void); + #endif /* __OUTPUT_JSON_COUNTERS_H__ */ diff --git a/src/output-json-tls.c b/src/output-json-tls.c index 9771f4d1cd7c..88d6bdbda0fd 100644 --- a/src/output-json-tls.c +++ b/src/output-json-tls.c @@ -46,6 +46,7 @@ #include "util-logopenfile.h" #include "util-ja3.h" +#include "util-ja4.h" #include "output-json.h" #include "output-json-tls.h" @@ -76,6 +77,7 @@ SC_ATOMIC_EXTERN(unsigned int, cert_id); #define LOG_TLS_FIELD_CLIENT (1 << 13) /**< client fields (issuer, subject, etc) */ #define LOG_TLS_FIELD_CLIENT_CERT (1 << 14) #define LOG_TLS_FIELD_CLIENT_CHAIN (1 << 15) +#define LOG_TLS_FIELD_JA4 (1 << 16) typedef struct { const char *name; @@ -90,7 +92,7 @@ TlsFields tls_fields[] = { { "version", LOG_TLS_FIELD_VERSION }, { "chain", LOG_TLS_FIELD_CHAIN }, { "session_resumed", LOG_TLS_FIELD_SESSION_RESUMED }, { "ja3", LOG_TLS_FIELD_JA3 }, { "ja3s", LOG_TLS_FIELD_JA3S }, { "client", LOG_TLS_FIELD_CLIENT }, { "client_certificate", LOG_TLS_FIELD_CLIENT_CERT }, - { "client_chain", LOG_TLS_FIELD_CLIENT_CHAIN }, { NULL, -1 } }; + { "client_chain", LOG_TLS_FIELD_CLIENT_CHAIN }, { "ja4", LOG_TLS_FIELD_JA4 }, { NULL, -1 } }; typedef struct OutputTlsCtx_ { uint32_t flags; /** Store mode */ @@ -210,6 +212,16 @@ static void JsonTlsLogJa3(JsonBuilder *js, SSLState *ssl_state) } } +static void JsonTlsLogSCJA4(JsonBuilder *js, SSLState *ssl_state) +{ + if (ssl_state->client_connp.ja4 != NULL) { + uint8_t buffer[JA4_HEX_LEN]; + /* JA4 hash has 36 characters */ + SCJA4GetHash(ssl_state->client_connp.ja4, (uint8_t(*)[JA4_HEX_LEN])buffer); + jb_set_string_from_bytes(js, "ja4", buffer, 36); + } +} + static void JsonTlsLogJa3SHash(JsonBuilder *js, SSLState *ssl_state) { if (ssl_state->server_connp.ja3_hash != NULL) { @@ -381,6 +393,10 @@ static void JsonTlsLogJSONCustom(OutputTlsCtx *tls_ctx, JsonBuilder *js, if (tls_ctx->fields & LOG_TLS_FIELD_JA3S) JsonTlsLogJa3S(js, ssl_state); + /* tls ja4 */ + if (tls_ctx->fields & LOG_TLS_FIELD_JA4) + JsonTlsLogSCJA4(js, ssl_state); + if (tls_ctx->fields & LOG_TLS_FIELD_CLIENT) { const bool log_cert = (tls_ctx->fields & LOG_TLS_FIELD_CLIENT_CERT) != 0; const bool log_chain = (tls_ctx->fields & LOG_TLS_FIELD_CLIENT_CHAIN) != 0; @@ -392,7 +408,7 @@ static void JsonTlsLogJSONCustom(OutputTlsCtx *tls_ctx, JsonBuilder *js, } } -void JsonTlsLogJSONExtended(JsonBuilder *tjs, SSLState * state) +void JsonTlsLogJSONExtended(JsonBuilder *tjs, SSLState *state, const bool log_ja4) { JsonTlsLogJSONBasic(tjs, state); @@ -420,6 +436,10 @@ void JsonTlsLogJSONExtended(JsonBuilder *tjs, SSLState * state) /* tls ja3s */ JsonTlsLogJa3S(tjs, state); + /* tls ja4 */ + if (log_ja4) + JsonTlsLogSCJA4(tjs, state); + if (HasClientCert(&state->client_connp)) { jb_open_object(tjs, "client"); JsonTlsLogClientCert(tjs, &state->client_connp, false, false); @@ -459,11 +479,14 @@ static int JsonTlsLogger(ThreadVars *tv, void *thread_data, const Packet *p, } /* log extended */ else if (tls_ctx->flags & LOG_TLS_EXTENDED) { - JsonTlsLogJSONExtended(js, ssl_state); + JsonTlsLogJSONExtended(js, ssl_state, tls_ctx->fields & LOG_TLS_FIELD_JA4); } /* log basic */ else { JsonTlsLogJSONBasic(js, ssl_state); + /* add ja4 hash */ + if (tls_ctx->fields & LOG_TLS_FIELD_JA4) + JsonTlsLogSCJA4(js, ssl_state); } /* print original application level protocol when it have been changed @@ -566,6 +589,12 @@ static OutputTlsCtx *OutputTlsInitCtx(ConfNode *conf) } } + /* In 7.0.x, ja4 hash is only logged when requested */ + const char *ja4 = ConfNodeLookupChildValue(conf, "ja4"); + if (ja4 && ConfValIsTrue(ja4)) { + tls_ctx->fields = LOG_TLS_FIELD_JA4; + } + const char *session_resumption = ConfNodeLookupChildValue(conf, "session-resumption"); if (session_resumption == NULL || ConfValIsTrue(session_resumption)) { tls_ctx->flags |= LOG_TLS_SESSION_RESUMPTION; diff --git a/src/output-json-tls.h b/src/output-json-tls.h index 737e6233ef10..4988abc6d42c 100644 --- a/src/output-json-tls.h +++ b/src/output-json-tls.h @@ -29,6 +29,6 @@ void JsonTlsLogRegister(void); #include "app-layer-ssl.h" void JsonTlsLogJSONBasic(JsonBuilder *js, SSLState *ssl_state); -void JsonTlsLogJSONExtended(JsonBuilder *js, SSLState *ssl_state); +void JsonTlsLogJSONExtended(JsonBuilder *js, SSLState *ssl_state, bool log_ja4); #endif /* __OUTPUT_JSON_TLS_H__ */ diff --git a/src/output-json.c b/src/output-json.c index 5d4255cd2897..6e5ff238d7a5 100644 --- a/src/output-json.c +++ b/src/output-json.c @@ -79,7 +79,8 @@ static void OutputJsonDeInitCtx(OutputCtx *); static void CreateEveCommunityFlowId(JsonBuilder *js, const Flow *f, const uint16_t seed); -static int CreateJSONEther(JsonBuilder *parent, const Packet *p, const Flow *f); +static int CreateJSONEther( + JsonBuilder *parent, const Packet *p, const Flow *f, enum OutputJsonLogDirection dir); static const char *TRAFFIC_ID_PREFIX = "traffic/id/"; static const char *TRAFFIC_LABEL_PREFIX = "traffic/label/"; @@ -412,14 +413,14 @@ void EveAddMetadata(const Packet *p, const Flow *f, JsonBuilder *js) } } -void EveAddCommonOptions(const OutputJsonCommonSettings *cfg, - const Packet *p, const Flow *f, JsonBuilder *js) +void EveAddCommonOptions(const OutputJsonCommonSettings *cfg, const Packet *p, const Flow *f, + JsonBuilder *js, enum OutputJsonLogDirection dir) { if (cfg->include_metadata) { EveAddMetadata(p, f, js); } if (cfg->include_ethernet) { - CreateJSONEther(js, p, f); + CreateJSONEther(js, p, f, dir); } if (cfg->include_community_id && f != NULL) { CreateEveCommunityFlowId(js, f, cfg->community_id_seed); @@ -444,7 +445,13 @@ void EvePacket(const Packet *p, JsonBuilder *js, unsigned long max_length) if (!jb_open_object(js, "packet_info")) { return; } + /* + * ensure the object is closed on error. This is done defensively + * in case additional logic is added before the final jb_close() + * invocation + */ if (!jb_set_uint(js, "linktype", p->datalink)) { + jb_close(js); return; } jb_close(js); @@ -742,14 +749,42 @@ static int MacSetIterateToJSON(uint8_t *val, MacSetSide side, void *data) return 0; } -static int CreateJSONEther(JsonBuilder *js, const Packet *p, const Flow *f) +static int CreateJSONEther( + JsonBuilder *js, const Packet *p, const Flow *f, enum OutputJsonLogDirection dir) { if (p != NULL) { /* this is a packet context, so we need to add scalar fields */ if (p->ethh != NULL) { jb_open_object(js, "ether"); - uint8_t *src = p->ethh->eth_src; - uint8_t *dst = p->ethh->eth_dst; + uint8_t *src; + uint8_t *dst; + switch (dir) { + case LOG_DIR_FLOW_TOSERVER: + // fallthrough + case LOG_DIR_FLOW: + if (PKT_IS_TOCLIENT(p)) { + src = p->ethh->eth_dst; + dst = p->ethh->eth_src; + } else { + src = p->ethh->eth_src; + dst = p->ethh->eth_dst; + } + break; + case LOG_DIR_FLOW_TOCLIENT: + if (PKT_IS_TOSERVER(p)) { + src = p->ethh->eth_dst; + dst = p->ethh->eth_src; + } else { + src = p->ethh->eth_src; + dst = p->ethh->eth_dst; + } + break; + case LOG_DIR_PACKET: + default: + src = p->ethh->eth_src; + dst = p->ethh->eth_dst; + break; + } JSONFormatAndAddMACAddr(js, "src_mac", src, false); JSONFormatAndAddMACAddr(js, "dest_mac", dst, false); jb_close(js); @@ -773,8 +808,15 @@ static int CreateJSONEther(JsonBuilder *js, const Packet *p, const Flow *f) } jb_close(info.dst); jb_close(info.src); - jb_set_object(js, "dest_macs", info.dst); - jb_set_object(js, "src_macs", info.src); + /* case is handling netflow too so may need to revert */ + if (dir == LOG_DIR_FLOW_TOCLIENT) { + jb_set_object(js, "dest_macs", info.src); + jb_set_object(js, "src_macs", info.dst); + } else { + DEBUG_VALIDATE_BUG_ON(dir != LOG_DIR_FLOW_TOSERVER && dir != LOG_DIR_FLOW); + jb_set_object(js, "dest_macs", info.dst); + jb_set_object(js, "src_macs", info.src); + } jb_free(info.dst); jb_free(info.src); jb_close(js); @@ -863,7 +905,7 @@ JsonBuilder *CreateEveHeader(const Packet *p, enum OutputJsonLogDirection dir, jb_set_string(js, "pkt_src", PktSrcToString(p->pkt_src)); if (eve_ctx != NULL) { - EveAddCommonOptions(&eve_ctx->cfg, p, f, js); + EveAddCommonOptions(&eve_ctx->cfg, p, f, js, dir); } return js; diff --git a/src/output-json.h b/src/output-json.h index 6fe6c5898d74..74d07bb8a2c8 100644 --- a/src/output-json.h +++ b/src/output-json.h @@ -110,8 +110,8 @@ OutputInitResult OutputJsonLogInitSub(ConfNode *conf, OutputCtx *parent_ctx); TmEcode JsonLogThreadInit(ThreadVars *t, const void *initdata, void **data); TmEcode JsonLogThreadDeinit(ThreadVars *t, void *data); -void EveAddCommonOptions(const OutputJsonCommonSettings *cfg, - const Packet *p, const Flow *f, JsonBuilder *js); +void EveAddCommonOptions(const OutputJsonCommonSettings *cfg, const Packet *p, const Flow *f, + JsonBuilder *js, enum OutputJsonLogDirection dir); void EveAddMetadata(const Packet *p, const Flow *f, JsonBuilder *js); int OutputJSONMemBufferCallback(const char *str, size_t size, void *data); diff --git a/src/output-lua.c b/src/output-lua.c index cc93a2de4a54..d2b708d97572 100644 --- a/src/output-lua.c +++ b/src/output-lua.c @@ -502,8 +502,10 @@ static int LuaScriptInit(const char *filename, LogLuaScriptOptions *options) { options->tcp_data = 1; else if (strcmp(k, "type") == 0 && strcmp(v, "stats") == 0) options->stats = 1; - else - SCLogInfo("unknown key and/or value: k='%s', v='%s'", k, v); + else { + SCLogError("unknown key and/or value: k='%s', v='%s'", k, v); + goto error; + } } if (((options->alproto != ALPROTO_UNKNOWN)) + options->packet + options->file > 1) { diff --git a/src/output-streaming.c b/src/output-streaming.c index 4aca9546d4a7..5baa4a36cac1 100644 --- a/src/output-streaming.c +++ b/src/output-streaming.c @@ -395,7 +395,7 @@ static TmEcode OutputStreamingLogThreadInit(ThreadVars *tv, const void *initdata tmp->next = ts; } - SCLogInfo("%s is now set up", logger->name); + SCLogDebug("%s is now set up", logger->name); } } diff --git a/src/output-tx.c b/src/output-tx.c index 18a34e78a734..042b424adec9 100644 --- a/src/output-tx.c +++ b/src/output-tx.c @@ -339,7 +339,9 @@ static TmEcode OutputTxLog(ThreadVars *tv, Packet *p, void *thread_data) DEBUG_VALIDATE_BUG_ON(thread_data == NULL); if (p->flow == NULL) return TM_ECODE_OK; - if (!((PKT_IS_PSEUDOPKT(p)) || p->flow->flags & (FLOW_TS_APP_UPDATED | FLOW_TC_APP_UPDATED))) { + if (!PKT_IS_PSEUDOPKT(p) && p->app_update_direction == 0 && + ((PKT_IS_TOSERVER(p) && (p->flow->flags & FLOW_TS_APP_UPDATED) == 0) || + (PKT_IS_TOCLIENT(p) && (p->flow->flags & FLOW_TC_APP_UPDATED) == 0))) { SCLogDebug("not pseudo, no app update: skip"); return TM_ECODE_OK; } diff --git a/src/packet.c b/src/packet.c index 40f3bdfcf385..3295764edf54 100644 --- a/src/packet.c +++ b/src/packet.c @@ -27,11 +27,11 @@ * * Set drop (+reject) flags in both current and root packet. * - * \param action action bit flags. Must be limited to ACTION_DROP_REJECT + * \param action action bit flags. Must be limited to ACTION_DROP_REJECT|ACTION_ALERT */ void PacketDrop(Packet *p, const uint8_t action, enum PacketDropReason r) { - DEBUG_VALIDATE_BUG_ON((action & ~ACTION_DROP_REJECT) != 0); + DEBUG_VALIDATE_BUG_ON((action & ~(ACTION_DROP_REJECT | ACTION_ALERT)) != 0); if (p->drop_reason == PKT_DROP_REASON_NOT_SET) p->drop_reason = (uint8_t)r; @@ -137,7 +137,6 @@ void PacketReinit(Packet *p) if (p->icmpv6h != NULL) { CLEAR_ICMPV6_PACKET(p); } - p->ppph = NULL; p->pppoesh = NULL; p->pppoedh = NULL; p->greh = NULL; diff --git a/src/reputation.c b/src/reputation.c index 75f3ba0c3fa4..dde42b09d381 100644 --- a/src/reputation.c +++ b/src/reputation.c @@ -124,31 +124,31 @@ static void SRepCIDRAddNetblock(SRepCIDRTree *cidr_ctx, char *ip, int cat, uint8 } } -static uint8_t SRepCIDRGetIPv4IPRep(SRepCIDRTree *cidr_ctx, uint8_t *ipv4_addr, uint8_t cat) +static int8_t SRepCIDRGetIPv4IPRep(SRepCIDRTree *cidr_ctx, uint8_t *ipv4_addr, uint8_t cat) { void *user_data = NULL; (void)SCRadixFindKeyIPV4BestMatch(ipv4_addr, cidr_ctx->srepIPV4_tree[cat], &user_data); if (user_data == NULL) - return 0; + return -1; SReputation *r = (SReputation *)user_data; return r->rep[cat]; } -static uint8_t SRepCIDRGetIPv6IPRep(SRepCIDRTree *cidr_ctx, uint8_t *ipv6_addr, uint8_t cat) +static int8_t SRepCIDRGetIPv6IPRep(SRepCIDRTree *cidr_ctx, uint8_t *ipv6_addr, uint8_t cat) { void *user_data = NULL; (void)SCRadixFindKeyIPV6BestMatch(ipv6_addr, cidr_ctx->srepIPV6_tree[cat], &user_data); if (user_data == NULL) - return 0; + return -1; SReputation *r = (SReputation *)user_data; return r->rep[cat]; } -uint8_t SRepCIDRGetIPRepSrc(SRepCIDRTree *cidr_ctx, Packet *p, uint8_t cat, uint32_t version) +int8_t SRepCIDRGetIPRepSrc(SRepCIDRTree *cidr_ctx, Packet *p, uint8_t cat, uint32_t version) { - uint8_t rep = 0; + int8_t rep = -3; if (PKT_IS_IPV4(p)) rep = SRepCIDRGetIPv4IPRep(cidr_ctx, (uint8_t *)GET_IPV4_SRC_ADDR_PTR(p), cat); @@ -158,9 +158,9 @@ uint8_t SRepCIDRGetIPRepSrc(SRepCIDRTree *cidr_ctx, Packet *p, uint8_t cat, uint return rep; } -uint8_t SRepCIDRGetIPRepDst(SRepCIDRTree *cidr_ctx, Packet *p, uint8_t cat, uint32_t version) +int8_t SRepCIDRGetIPRepDst(SRepCIDRTree *cidr_ctx, Packet *p, uint8_t cat, uint32_t version) { - uint8_t rep = 0; + int8_t rep = -3; if (PKT_IS_IPV4(p)) rep = SRepCIDRGetIPv4IPRep(cidr_ctx, (uint8_t *)GET_IPV4_DST_ADDR_PTR(p), cat); @@ -587,7 +587,6 @@ int SRepInit(DetectEngineCtx *de_ctx) ConfNode *file = NULL; const char *filename = NULL; int init = 0; - int i = 0; de_ctx->srepCIDR_ctx = (SRepCIDRTree *)SCMalloc(sizeof(SRepCIDRTree)); if (de_ctx->srepCIDR_ctx == NULL) @@ -595,11 +594,6 @@ int SRepInit(DetectEngineCtx *de_ctx) memset(de_ctx->srepCIDR_ctx, 0, sizeof(SRepCIDRTree)); SRepCIDRTree *cidr_ctx = de_ctx->srepCIDR_ctx; - for (i = 0; i < SREP_MAX_CATS; i++) { - cidr_ctx->srepIPV4_tree[i] = NULL; - cidr_ctx->srepIPV6_tree[i] = NULL; - } - if (SRepGetVersion() == 0) { SC_ATOMIC_INIT(srep_eversion); init = 1; diff --git a/src/reputation.h b/src/reputation.h index 3ed94d985159..9f14adee65ee 100644 --- a/src/reputation.h +++ b/src/reputation.h @@ -49,8 +49,8 @@ void SRepDestroy(struct DetectEngineCtx_ *de_ctx); void SRepReloadComplete(void); int SRepHostTimedOut(Host *); -uint8_t SRepCIDRGetIPRepSrc(SRepCIDRTree *cidr_ctx, Packet *p, uint8_t cat, uint32_t version); -uint8_t SRepCIDRGetIPRepDst(SRepCIDRTree *cidr_ctx, Packet *p, uint8_t cat, uint32_t version); +int8_t SRepCIDRGetIPRepSrc(SRepCIDRTree *cidr_ctx, Packet *p, uint8_t cat, uint32_t version); +int8_t SRepCIDRGetIPRepDst(SRepCIDRTree *cidr_ctx, Packet *p, uint8_t cat, uint32_t version); void SRepResetVersion(void); int SRepLoadCatFileFromFD(FILE *fp); int SRepLoadFileFromFD(SRepCIDRTree *cidr_ctx, FILE *fp); diff --git a/src/runmode-af-packet.c b/src/runmode-af-packet.c index b8ad0bfac0c3..742d96855bb7 100644 --- a/src/runmode-af-packet.c +++ b/src/runmode-af-packet.c @@ -90,7 +90,6 @@ static int AFPRunModeIsIPS(void) SCLogError("Problem with config file"); return 0; } - const char *copymodestr = NULL; if_root = ConfFindDeviceConfig(af_packet_node, live_dev); if (if_root == NULL) { @@ -101,7 +100,10 @@ static int AFPRunModeIsIPS(void) if_root = if_default; } - if (ConfGetChildValueWithDefault(if_root, if_default, "copy-mode", ©modestr) == 1) { + const char *copymodestr = NULL; + const char *copyifacestr = NULL; + if (ConfGetChildValueWithDefault(if_root, if_default, "copy-mode", ©modestr) == 1 && + ConfGetChildValue(if_root, "copy-iface", ©ifacestr) == 1) { if (strcmp(copymodestr, "ips") == 0) { has_ips = 1; } else { diff --git a/src/runmode-dpdk.c b/src/runmode-dpdk.c index 2cdf5cb32505..e9642a9beb08 100644 --- a/src/runmode-dpdk.c +++ b/src/runmode-dpdk.c @@ -54,9 +54,12 @@ #define RSS_HKEY_LEN 40 // General purpose RSS key for symmetric bidirectional flow distribution -uint8_t rss_hkey[] = { 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, - 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, - 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A }; +uint8_t rss_hkey[] = { + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, // 40 + 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, 0x6D, 0x5A, // 52 +}; // Calculates the closest multiple of y from x #define ROUNDUP(x, y) ((((x) + ((y)-1)) / (y)) * (y)) @@ -111,6 +114,7 @@ static void *ParseDpdkConfigAndConfigureDevice(const char *iface); static void DPDKDerefConfig(void *conf); #define DPDK_CONFIG_DEFAULT_THREADS "auto" +#define DPDK_CONFIG_DEFAULT_INTERRUPT_MODE false #define DPDK_CONFIG_DEFAULT_MEMPOOL_SIZE 65535 #define DPDK_CONFIG_DEFAULT_MEMPOOL_CACHE_SIZE "auto" #define DPDK_CONFIG_DEFAULT_RX_DESCRIPTORS 1024 @@ -126,6 +130,7 @@ static void DPDKDerefConfig(void *conf); DPDKIfaceConfigAttributes dpdk_yaml = { .threads = "threads", + .irq_mode = "interrupt-mode", .promisc = "promisc", .multicast = "multicast", .checksum_checks = "checksum-checks", @@ -300,7 +305,6 @@ static void InitEal(void) if (retval < 0) { // retval bound to the result of rte_eal_init FatalError("DPDK EAL initialization error: %s", rte_strerror(-retval)); } - DPDKSetTimevalOfMachineStart(); } static void DPDKDerefConfig(void *conf) @@ -434,6 +438,15 @@ static int ConfigSetThreads(DPDKIfaceConfig *iconf, const char *entry_str) SCReturnInt(0); } +static bool ConfigSetInterruptMode(DPDKIfaceConfig *iconf, bool enable) +{ + SCEnter(); + if (enable) + iconf->flags |= DPDK_IRQ_MODE; + + SCReturnBool(true); +} + static int ConfigSetRxQueues(DPDKIfaceConfig *iconf, uint16_t nb_queues) { SCEnter(); @@ -464,6 +477,9 @@ static int ConfigSetMempoolSize(DPDKIfaceConfig *iconf, intmax_t entry_int) if (entry_int <= 0) { SCLogError("%s: positive memory pool size is required", iconf->iface); SCReturnInt(-ERANGE); + } else if (entry_int > UINT32_MAX) { + SCLogError("%s: memory pool size cannot exceed %" PRIu32, iconf->iface, UINT32_MAX); + SCReturnInt(-ERANGE); } iconf->mempool_size = entry_int; @@ -484,7 +500,7 @@ static int ConfigSetMempoolCacheSize(DPDKIfaceConfig *iconf, const char *entry_s SCReturnInt(-EINVAL); } - uint32_t max_cache_size = MAX(RTE_MEMPOOL_CACHE_MAX_SIZE, iconf->mempool_size / 1.5); + uint32_t max_cache_size = MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, iconf->mempool_size / 1.5); iconf->mempool_cache_size = GreatestDivisorUpTo(iconf->mempool_size, max_cache_size); SCReturnInt(0); } @@ -510,6 +526,9 @@ static int ConfigSetRxDescriptors(DPDKIfaceConfig *iconf, intmax_t entry_int) if (entry_int <= 0) { SCLogError("%s: positive number of RX descriptors is required", iconf->iface); SCReturnInt(-ERANGE); + } else if (entry_int > UINT16_MAX) { + SCLogError("%s: number of RX descriptors cannot exceed %" PRIu16, iconf->iface, UINT16_MAX); + SCReturnInt(-ERANGE); } iconf->nb_rx_desc = entry_int; @@ -522,6 +541,9 @@ static int ConfigSetTxDescriptors(DPDKIfaceConfig *iconf, intmax_t entry_int) if (entry_int <= 0) { SCLogError("%s: positive number of TX descriptors is required", iconf->iface); SCReturnInt(-ERANGE); + } else if (entry_int > UINT16_MAX) { + SCLogError("%s: number of TX descriptors cannot exceed %" PRIu16, iconf->iface, UINT16_MAX); + SCReturnInt(-ERANGE); } iconf->nb_tx_desc = entry_int; @@ -695,6 +717,17 @@ static int ConfigLoad(DPDKIfaceConfig *iconf, const char *iface) if (retval < 0) SCReturnInt(retval); + bool irq_enable; + retval = ConfGetChildValueBoolWithDefault(if_root, if_default, dpdk_yaml.irq_mode, &entry_bool); + if (retval != 1) { + irq_enable = DPDK_CONFIG_DEFAULT_INTERRUPT_MODE; + } else { + irq_enable = entry_bool ? true : false; + } + retval = ConfigSetInterruptMode(iconf, irq_enable); + if (retval != true) + SCReturnInt(-EINVAL); + // currently only mapping "1 thread == 1 RX (and 1 TX queue in IPS mode)" is supported retval = ConfigSetRxQueues(iconf, (uint16_t)iconf->threads); if (retval < 0) @@ -837,7 +870,7 @@ static void DeviceSetPMDSpecificRSS(struct rte_eth_rss_conf *rss_conf, const cha if (strcmp(driver_name, "net_i40e") == 0) i40eDeviceSetRSSConf(rss_conf); if (strcmp(driver_name, "net_ice") == 0) - iceDeviceSetRSSHashFunction(&rss_conf->rss_hf); + iceDeviceSetRSSConf(rss_conf); if (strcmp(driver_name, "net_ixgbe") == 0) ixgbeDeviceSetRSSHashFunction(&rss_conf->rss_hf); if (strcmp(driver_name, "net_e1000_igb") == 0) @@ -1106,6 +1139,11 @@ static void DeviceInitPortConf(const DPDKIfaceConfig *iconf, }, }; + SCLogConfig("%s: interrupt mode is %s", iconf->iface, + iconf->flags & DPDK_IRQ_MODE ? "enabled" : "disabled"); + if (iconf->flags & DPDK_IRQ_MODE) + port_conf->intr_conf.rxq = 1; + // configure RX offloads if (dev_info->rx_offload_capa & RTE_ETH_RX_OFFLOAD_RSS_HASH) { if (iconf->nb_rx_queues > 1) { @@ -1424,12 +1462,17 @@ static int DeviceConfigure(DPDKIfaceConfig *iconf) if (retval < 0) return retval; + uint16_t tmp_nb_rx_desc = iconf->nb_rx_desc; + uint16_t tmp_nb_tx_desc = iconf->nb_tx_desc; retval = rte_eth_dev_adjust_nb_rx_tx_desc( iconf->port_id, &iconf->nb_rx_desc, &iconf->nb_tx_desc); if (retval != 0) { SCLogError("%s: failed to adjust device queue descriptors (port %u, err %d)", iconf->iface, iconf->port_id, retval); SCReturnInt(retval); + } else if (tmp_nb_rx_desc != iconf->nb_rx_desc || tmp_nb_tx_desc != iconf->nb_tx_desc) { + SCLogWarning("%s: device queue descriptors adjusted (RX: from %u to %u, TX: from %u to %u)", + iconf->iface, tmp_nb_rx_desc, iconf->nb_rx_desc, tmp_nb_tx_desc, iconf->nb_tx_desc); } retval = iconf->flags & DPDK_MULTICAST ? rte_eth_allmulticast_enable(iconf->port_id) @@ -1602,7 +1645,9 @@ static int DPDKRunModeIsIPS(void) } const char *copymodestr = NULL; - if (ConfGetChildValueWithDefault(if_root, if_default, "copy-mode", ©modestr) == 1) { + const char *copyifacestr = NULL; + if (ConfGetChildValueWithDefault(if_root, if_default, "copy-mode", ©modestr) == 1 && + ConfGetChildValue(if_root, "copy-iface", ©ifacestr) == 1) { if (strcmp(copymodestr, "ips") == 0) { has_ips = true; } else { diff --git a/src/runmode-dpdk.h b/src/runmode-dpdk.h index a00327ba9e24..152c1d687893 100644 --- a/src/runmode-dpdk.h +++ b/src/runmode-dpdk.h @@ -25,6 +25,7 @@ typedef struct DPDKIfaceConfigAttributes_ { const char *threads; + const char *irq_mode; const char *promisc; const char *multicast; const char *checksum_checks; diff --git a/src/runmode-napatech.c b/src/runmode-napatech.c index cb8f560ea350..fe0212437989 100644 --- a/src/runmode-napatech.c +++ b/src/runmode-napatech.c @@ -200,7 +200,12 @@ static void *NapatechConfigParser(const char *device) if (ConfGetInt("napatech.hba", &conf->hba) == 0) { conf->hba = -1; } else { - SCLogWarning("Napatech Host Buffer Allocation (hba) will be deprecated in Suricata v7.0."); + static bool warn_once = false; + if (!warn_once) { + SCLogWarning( + "Napatech Host Buffer Allowance (hba) will be deprecated in Suricata v8.0."); + warn_once = true; + } } return (void *) conf; } diff --git a/src/runmode-netmap.c b/src/runmode-netmap.c index 927dc718856c..947b381229d2 100644 --- a/src/runmode-netmap.c +++ b/src/runmode-netmap.c @@ -81,7 +81,6 @@ static int NetmapRunModeIsIPS(void) SCLogError("Problem with config file"); return 0; } - const char *copymodestr = NULL; if_root = ConfNodeLookupKeyValue(netmap_node, "interface", live_dev); if (if_root == NULL) { @@ -92,7 +91,10 @@ static int NetmapRunModeIsIPS(void) if_root = if_default; } - if (ConfGetChildValueWithDefault(if_root, if_default, "copy-mode", ©modestr) == 1) { + const char *copymodestr = NULL; + const char *copyifacestr = NULL; + if (ConfGetChildValueWithDefault(if_root, if_default, "copy-mode", ©modestr) == 1 && + ConfGetChildValue(if_root, "copy-iface", ©ifacestr) == 1) { if (strcmp(copymodestr, "ips") == 0) { has_ips = 1; } else { @@ -344,7 +346,9 @@ static void *ParseNetmapConfig(const char *iface_name) } } - int ring_count = NetmapGetRSSCount(aconf->iface_name); + int ring_count = 0; + if (aconf->in.real) + ring_count = NetmapGetRSSCount(aconf->iface_name); if (strlen(aconf->iface_name) > 0 && (aconf->iface_name[strlen(aconf->iface_name) - 1] == '^' || aconf->iface_name[strlen(aconf->iface_name) - 1] == '*')) { diff --git a/src/runmode-pfring.c b/src/runmode-pfring.c index b0af83b4bfc2..7f1f74f40fda 100644 --- a/src/runmode-pfring.c +++ b/src/runmode-pfring.c @@ -200,6 +200,7 @@ static void *ParsePfringConfig(const char *iface) cluster_type default_ctype = CLUSTER_FLOW; int getctype = 0; int bool_val; + const char *active_runmode = RunmodeGetActive(); if (unlikely(pfconf == NULL)) { return NULL; @@ -244,7 +245,9 @@ static void *ParsePfringConfig(const char *iface) if_default = NULL; } - if (ConfGetChildValueWithDefault(if_root, if_default, "threads", &threadsstr) != 1) { + if (active_runmode && !strcmp("single", active_runmode)) { + pfconf->threads = 1; + } else if (ConfGetChildValueWithDefault(if_root, if_default, "threads", &threadsstr) != 1) { pfconf->threads = 1; } else if (threadsstr != NULL) { if (strcmp(threadsstr, "auto") == 0) { diff --git a/src/runmode-unittests.c b/src/runmode-unittests.c index 1150bad89580..8ce0244146a3 100644 --- a/src/runmode-unittests.c +++ b/src/runmode-unittests.c @@ -114,6 +114,8 @@ #include "decode-vntag.h" #include "decode-vxlan.h" +#include "output-json-stats.h" + #ifdef OS_WIN32 #include "win32-syscall.h" #endif @@ -215,6 +217,7 @@ static void RegisterUnittests(void) #endif SCProtoNameRegisterTests(); UtilCIDRTests(); + OutputJsonStatsRegisterTests(); } #endif diff --git a/src/runmode-unix-socket.c b/src/runmode-unix-socket.c index e695cb8dfbd6..20b25f25f9cd 100644 --- a/src/runmode-unix-socket.c +++ b/src/runmode-unix-socket.c @@ -545,7 +545,7 @@ static TmEcode UnixSocketPcapFilesCheck(void *data) if (cfile->tenant_id > 0) { char tstr[16]; - snprintf(tstr, sizeof(tstr), "%d", cfile->tenant_id); + snprintf(tstr, sizeof(tstr), "%u", cfile->tenant_id); if (ConfSetFinal("pcap-file.tenant-id", tstr) != 1) { SCLogError("Can not set working tenant-id to '%s'", tstr); PcapFilesFree(cfile); @@ -1038,7 +1038,7 @@ TmEcode UnixSocketRegisterTenant(json_t *cmd, json_t* answer, void *data) /* setup the yaml in this loop so that it's not done by the loader * threads. ConfYamlLoadFileWithPrefix is not thread safe. */ char prefix[64]; - snprintf(prefix, sizeof(prefix), "multi-detect.%d", tenant_id); + snprintf(prefix, sizeof(prefix), "multi-detect.%u", tenant_id); if (ConfYamlLoadFileWithPrefix(filename, prefix) != 0) { SCLogError("failed to load yaml %s", filename); json_object_set_new(answer, "message", json_string("failed to load yaml")); @@ -1187,7 +1187,7 @@ TmEcode UnixSocketUnregisterTenant(json_t *cmd, json_t* answer, void *data) /* 2 remove it from the system */ char prefix[64]; - snprintf(prefix, sizeof(prefix), "multi-detect.%d", tenant_id); + snprintf(prefix, sizeof(prefix), "multi-detect.%u", tenant_id); DetectEngineCtx *de_ctx = DetectEngineGetByTenantId(tenant_id); if (de_ctx == NULL) { @@ -1280,7 +1280,7 @@ TmEcode UnixSocketHostbitAdd(json_t *cmd, json_t* answer, void *data_usused) Host *host = HostGetHostFromHash(&a); if (host) { HostBitSet(host, idx, SCTIME_SECS(current_time) + expire); - HostUnlock(host); + HostRelease(host); json_object_set_new(answer, "message", json_string("hostbit added")); return TM_ECODE_OK; @@ -1349,7 +1349,7 @@ TmEcode UnixSocketHostbitRemove(json_t *cmd, json_t* answer, void *data_unused) Host *host = HostLookupHostFromHash(&a); if (host) { HostBitUnset(host, idx); - HostUnlock(host); + HostRelease(host); json_object_set_new(answer, "message", json_string("hostbit removed")); return TM_ECODE_OK; } else { @@ -1426,7 +1426,7 @@ TmEcode UnixSocketHostbitList(json_t *cmd, json_t* answer, void *data_unused) bits[use].expire = iter->expire; use++; } - HostUnlock(host); + HostRelease(host); json_t *jdata = json_object(); json_t *jarray = json_array(); diff --git a/src/runmodes.c b/src/runmodes.c index 348adfa25d43..d4b57df5e8bf 100644 --- a/src/runmodes.c +++ b/src/runmodes.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2022 Open Information Security Foundation +/* Copyright (C) 2007-2024 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free @@ -70,6 +70,7 @@ #include "counters.h" #include "suricata-plugin.h" +#include "util-device.h" int debuglog_enabled = 0; int threading_set_cpu_affinity = FALSE; @@ -408,6 +409,14 @@ void RunModeEngineIsIPS(int capture_mode, const char *runmode, const char *captu if (mode->RunModeIsIPSEnabled != NULL) { mode->RunModeIsIPSEnabled(); + + if (EngineModeIsIPS()) { + extern uint16_t g_livedev_mask; + if (g_livedev_mask != 0 && LiveGetDeviceCount() > 0) { + SCLogWarning("disabling livedev.use-for-tracking with IPS mode. See ticket #6726."); + g_livedev_mask = 0; + } + } } } diff --git a/src/source-dpdk.c b/src/source-dpdk.c index 54503e212271..d6d5b9c43145 100644 --- a/src/source-dpdk.c +++ b/src/source-dpdk.c @@ -91,8 +91,14 @@ TmEcode NoDPDKSupportExit(ThreadVars *tv, const void *initdata, void **data) #include "util-dpdk-bonding.h" #include -#define BURST_SIZE 32 -static struct timeval machine_start_time = { 0, 0 }; +#define BURST_SIZE 32 +// interrupt mode constants +#define MIN_ZERO_POLL_COUNT 10U +#define MIN_ZERO_POLL_COUNT_TO_SLEEP 10U +#define MINIMUM_SLEEP_TIME_US 1U +#define STANDARD_SLEEP_TIME_US 100U +#define MAX_EPOLL_TIMEOUT_MS 500U +static rte_spinlock_t intr_lock[RTE_MAX_ETHPORTS]; /** * \brief Structure to hold thread specific variables. @@ -104,6 +110,7 @@ typedef struct DPDKThreadVars_ { TmSlot *slot; LiveDevice *livedev; ChecksumValidationMode checksum_mode; + bool intr_enabled; /* references to packet and drop counters */ uint16_t capture_dpdk_packets; uint16_t capture_dpdk_rx_errs; @@ -137,67 +144,46 @@ static TmEcode DecodeDPDKThreadInit(ThreadVars *, const void *, void **); static TmEcode DecodeDPDKThreadDeinit(ThreadVars *tv, void *data); static TmEcode DecodeDPDK(ThreadVars *, Packet *, void *); -static uint64_t CyclesToMicroseconds(uint64_t cycles); -static uint64_t CyclesToSeconds(uint64_t cycles); static void DPDKFreeMbufArray(struct rte_mbuf **mbuf_array, uint16_t mbuf_cnt, uint16_t offset); -static uint64_t DPDKGetSeconds(void); - -static void DPDKFreeMbufArray(struct rte_mbuf **mbuf_array, uint16_t mbuf_cnt, uint16_t offset) +static bool InterruptsRXEnable(uint16_t port_id, uint16_t queue_id) { - for (int i = offset; i < mbuf_cnt; i++) { - rte_pktmbuf_free(mbuf_array[i]); + uint32_t event_data = port_id << UINT16_WIDTH | queue_id; + int32_t ret = rte_eth_dev_rx_intr_ctl_q(port_id, queue_id, RTE_EPOLL_PER_THREAD, + RTE_INTR_EVENT_ADD, (void *)((uintptr_t)event_data)); + + if (ret != 0) { + SCLogError("%s-Q%d: failed to enable interrupt mode: %s", DPDKGetPortNameByPortID(port_id), + queue_id, rte_strerror(-ret)); + return false; } + return true; } -static uint64_t CyclesToMicroseconds(const uint64_t cycles) +static inline uint32_t InterruptsSleepHeuristic(uint32_t no_pkt_polls_count) { - const uint64_t ticks_per_us = rte_get_tsc_hz() / 1000000; - if (ticks_per_us == 0) { - return 0; - } - return cycles / ticks_per_us; -} + if (no_pkt_polls_count < MIN_ZERO_POLL_COUNT_TO_SLEEP) + return MINIMUM_SLEEP_TIME_US; -static uint64_t CyclesToSeconds(const uint64_t cycles) -{ - const uint64_t ticks_per_s = rte_get_tsc_hz(); - if (ticks_per_s == 0) { - return 0; - } - return cycles / ticks_per_s; + return STANDARD_SLEEP_TIME_US; } -static void CyclesAddToTimeval( - const uint64_t cycles, struct timeval *orig_tv, struct timeval *new_tv) +static inline void InterruptsTurnOnOff(uint16_t port_id, uint16_t queue_id, bool on) { - uint64_t usec = CyclesToMicroseconds(cycles) + orig_tv->tv_usec; - new_tv->tv_sec = orig_tv->tv_sec + usec / 1000000; - new_tv->tv_usec = (usec % 1000000); -} + rte_spinlock_lock(&(intr_lock[port_id])); -void DPDKSetTimevalOfMachineStart(void) -{ - gettimeofday(&machine_start_time, NULL); - machine_start_time.tv_sec -= DPDKGetSeconds(); -} + if (on) + rte_eth_dev_rx_intr_enable(port_id, queue_id); + else + rte_eth_dev_rx_intr_disable(port_id, queue_id); -/** - * Initializes real_tv to the correct real time. Adds TSC counter value to the timeval of - * the machine start - * @param machine_start_tv - timestamp when the machine was started - * @param real_tv - */ -static SCTime_t DPDKSetTimevalReal(struct timeval *machine_start_tv) -{ - struct timeval real_tv; - CyclesAddToTimeval(rte_get_tsc_cycles(), machine_start_tv, &real_tv); - return SCTIME_FROM_TIMEVAL(&real_tv); + rte_spinlock_unlock(&(intr_lock[port_id])); } -/* get number of seconds from the reset of TSC counter (typically from the machine start) */ -static uint64_t DPDKGetSeconds(void) +static void DPDKFreeMbufArray(struct rte_mbuf **mbuf_array, uint16_t mbuf_cnt, uint16_t offset) { - return CyclesToSeconds(rte_get_tsc_cycles()); + for (int i = offset; i < mbuf_cnt; i++) { + rte_pktmbuf_free(mbuf_array[i]); + } } static void DevicePostStartPMDSpecificActions(DPDKThreadVars *ptv, const char *driver_name) @@ -359,10 +345,10 @@ static TmEcode ReceiveDPDKLoop(ThreadVars *tv, void *data, void *slot) SCEnter(); Packet *p; uint16_t nb_rx; - time_t last_dump = 0; - time_t current_time; + SCTime_t last_dump = { 0 }; + SCTime_t current_time; bool segmented_mbufs_warned = 0; - SCTime_t t = DPDKSetTimevalReal(&machine_start_time); + SCTime_t t = TimeGet(); uint64_t last_timeout_msec = SCTIME_MSECS(t); DPDKThreadVars *ptv = (DPDKThreadVars *)data; @@ -377,6 +363,11 @@ static TmEcode ReceiveDPDKLoop(ThreadVars *tv, void *data, void *slot) rte_eth_stats_reset(ptv->port_id); rte_eth_xstats_reset(ptv->port_id); + + uint32_t pwd_zero_rx_packet_polls_count = 0; + if (ptv->intr_enabled && !InterruptsRXEnable(ptv->port_id, ptv->queue_id)) + SCReturnInt(TM_ECODE_FAILED); + while (1) { if (unlikely(suricata_ctl_flags != 0)) { SCLogDebug("Stopping Suricata!"); @@ -392,13 +383,33 @@ static TmEcode ReceiveDPDKLoop(ThreadVars *tv, void *data, void *slot) nb_rx = rte_eth_rx_burst(ptv->port_id, ptv->queue_id, ptv->received_mbufs, BURST_SIZE); if (unlikely(nb_rx == 0)) { - t = DPDKSetTimevalReal(&machine_start_time); + t = TimeGet(); uint64_t msecs = SCTIME_MSECS(t); if (msecs > last_timeout_msec + 100) { TmThreadsCaptureHandleTimeout(tv, NULL); last_timeout_msec = msecs; } - continue; + + if (!ptv->intr_enabled) + continue; + + pwd_zero_rx_packet_polls_count++; + if (pwd_zero_rx_packet_polls_count <= MIN_ZERO_POLL_COUNT) + continue; + + uint32_t pwd_idle_hint = InterruptsSleepHeuristic(pwd_zero_rx_packet_polls_count); + + if (pwd_idle_hint < STANDARD_SLEEP_TIME_US) { + rte_delay_us(pwd_idle_hint); + } else { + InterruptsTurnOnOff(ptv->port_id, ptv->queue_id, true); + struct rte_epoll_event event; + rte_epoll_wait(RTE_EPOLL_PER_THREAD, &event, 1, MAX_EPOLL_TIMEOUT_MS); + InterruptsTurnOnOff(ptv->port_id, ptv->queue_id, false); + continue; + } + } else if (ptv->intr_enabled && pwd_zero_rx_packet_polls_count) { + pwd_zero_rx_packet_polls_count = 0; } ptv->pkts += (uint64_t)nb_rx; @@ -413,7 +424,7 @@ static TmEcode ReceiveDPDKLoop(ThreadVars *tv, void *data, void *slot) p->flags |= PKT_IGNORE_CHECKSUM; } - p->ts = DPDKSetTimevalReal(&machine_start_time); + p->ts = TimeGet(); p->dpdk_v.mbuf = ptv->received_mbufs[i]; p->ReleasePacket = DPDKReleasePacket; p->dpdk_v.copy_mode = ptv->copy_mode; @@ -469,8 +480,8 @@ static TmEcode ReceiveDPDKLoop(ThreadVars *tv, void *data, void *slot) } /* Trigger one dump of stats every second */ - current_time = DPDKGetSeconds(); - if (current_time != last_dump) { + current_time = TimeGet(); + if (current_time.secs != last_dump.secs) { DPDKDumpCounters(ptv); last_dump = current_time; } @@ -522,6 +533,7 @@ static TmEcode ReceiveDPDKThreadInit(ThreadVars *tv, const void *initdata, void ptv->checksum_mode = dpdk_config->checksum_mode; ptv->threads = dpdk_config->threads; + ptv->intr_enabled = (dpdk_config->flags & DPDK_IRQ_MODE) ? true : false; ptv->port_id = dpdk_config->port_id; ptv->out_port_id = dpdk_config->out_port_id; ptv->port_socket_id = dpdk_config->socket_id; @@ -564,11 +576,14 @@ static TmEcode ReceiveDPDKThreadInit(ThreadVars *tv, const void *initdata, void if (inconsistent_numa_cnt > 0 && ptv->port_socket_id != SOCKET_ID_ANY) { SCLogWarning("%s: NIC is on NUMA %d, %u threads on different NUMA node(s)", dpdk_config->iface, ptv->port_socket_id, inconsistent_numa_cnt); - } else if (ptv->port_socket_id == SOCKET_ID_ANY) { + } else if (ptv->port_socket_id == SOCKET_ID_ANY && rte_socket_count() > 1) { SCLogNotice( "%s: unable to determine NIC's NUMA node, degraded performance can be expected", dpdk_config->iface); } + if (ptv->intr_enabled) { + rte_spinlock_init(&intr_lock[ptv->port_id]); + } } *data = (void *)ptv; diff --git a/src/source-dpdk.h b/src/source-dpdk.h index 3fdb63cb35d9..edc19ec917d0 100644 --- a/src/source-dpdk.h +++ b/src/source-dpdk.h @@ -38,10 +38,10 @@ typedef enum { DPDK_COPY_MODE_NONE, DPDK_COPY_MODE_TAP, DPDK_COPY_MODE_IPS } Dpd // General flags #define DPDK_PROMISC (1 << 0) /**< Promiscuous mode */ #define DPDK_MULTICAST (1 << 1) /**< Enable multicast packets */ +#define DPDK_IRQ_MODE (1 << 2) /**< Interrupt mode */ // Offloads #define DPDK_RX_CHECKSUM_OFFLOAD (1 << 4) /**< Enable chsum offload */ -void DPDKSetTimevalOfMachineStart(void); typedef struct DPDKIfaceConfig_ { #ifdef HAVE_DPDK char iface[RTE_ETH_NAME_MAX_LEN]; diff --git a/src/source-erf-dag.c b/src/source-erf-dag.c index b1a8286360cc..ee1e14b84c89 100644 --- a/src/source-erf-dag.c +++ b/src/source-erf-dag.c @@ -118,10 +118,10 @@ static inline TmEcode ProcessErfDagRecords(ErfDagThreadVars *ewtn, uint8_t *top, uint32_t *pkts_read); static inline TmEcode ProcessErfDagRecord(ErfDagThreadVars *ewtn, char *prec); TmEcode ReceiveErfDagLoop(ThreadVars *, void *data, void *slot); -TmEcode ReceiveErfDagThreadInit(ThreadVars *, void *, void **); +TmEcode ReceiveErfDagThreadInit(ThreadVars *, const void *, void **); void ReceiveErfDagThreadExitStats(ThreadVars *, void *); TmEcode ReceiveErfDagThreadDeinit(ThreadVars *, void *); -TmEcode DecodeErfDagThreadInit(ThreadVars *, void *, void **); +TmEcode DecodeErfDagThreadInit(ThreadVars *, const void *, void **); TmEcode DecodeErfDagThreadDeinit(ThreadVars *tv, void *data); TmEcode DecodeErfDag(ThreadVars *, Packet *, void *); void ReceiveErfDagCloseStream(int dagfd, int stream); @@ -175,8 +175,7 @@ TmModuleDecodeErfDagRegister(void) * \param data data pointer gets populated with * */ -TmEcode -ReceiveErfDagThreadInit(ThreadVars *tv, void *initdata, void **data) +TmEcode ReceiveErfDagThreadInit(ThreadVars *tv, const void *initdata, void **data) { SCEnter(); int stream_count = 0; @@ -186,7 +185,7 @@ ReceiveErfDagThreadInit(ThreadVars *tv, void *initdata, void **data) SCReturnInt(TM_ECODE_FAILED); } - ErfDagThreadVars *ewtn = SCMalloc(sizeof(ErfDagThreadVars)); + ErfDagThreadVars *ewtn = SCCalloc(1, sizeof(ErfDagThreadVars)); if (unlikely(ewtn == NULL)) { FatalError("Failed to allocate memory for ERF DAG thread vars."); } @@ -198,14 +197,14 @@ ReceiveErfDagThreadInit(ThreadVars *tv, void *initdata, void **data) */ if (dag_parse_name(initdata, ewtn->dagname, DAGNAME_BUFSIZE, &ewtn->dagstream) < 0) { - SCLogError("Failed to parse DAG interface: %s", (char *)initdata); + SCLogError("Failed to parse DAG interface: %s", (const char *)initdata); SCFree(ewtn); exit(EXIT_FAILURE); } ewtn->livedev = LiveGetDevice(initdata); if (ewtn->livedev == NULL) { - SCLogError("Unable to get %s live device", (char *)initdata); + SCLogError("Unable to get %s live device", (const char *)initdata); SCFree(ewtn); SCReturnInt(TM_ECODE_FAILED); } @@ -508,17 +507,13 @@ ProcessErfDagRecord(ErfDagThreadVars *ewtn, char *prec) SCReturnInt(TM_ECODE_FAILED); } - /* Convert ERF time to timeval - from libpcap. */ + /* Convert ERF time to SCTime_t */ uint64_t ts = dr->ts; p->ts = SCTIME_FROM_SECS(ts >> 32); ts = (ts & 0xffffffffULL) * 1000000; ts += 0x80000000; /* rounding */ uint64_t usecs = ts >> 32; - if (usecs >= 1000000) { - usecs -= 1000000; - p->ts += SCTIME_FROM_SECS(1); - } - p->ts += SCTIME_FROM_USECS(usecs); + p->ts = SCTIME_ADD_USECS(p->ts, usecs); StatsIncr(ewtn->tv, ewtn->packets); ewtn->bytes += wlen; @@ -618,8 +613,7 @@ DecodeErfDag(ThreadVars *tv, Packet *p, void *data) SCReturnInt(TM_ECODE_OK); } -TmEcode -DecodeErfDagThreadInit(ThreadVars *tv, void *initdata, void **data) +TmEcode DecodeErfDagThreadInit(ThreadVars *tv, const void *initdata, void **data) { SCEnter(); DecodeThreadVars *dtv = NULL; diff --git a/src/source-erf-file.c b/src/source-erf-file.c index fcbc304d369b..3c2d220d9b6d 100644 --- a/src/source-erf-file.c +++ b/src/source-erf-file.c @@ -195,17 +195,12 @@ static inline TmEcode ReadErfRecord(ThreadVars *tv, Packet *p, void *data) GET_PKT_LEN(p) = wlen; p->datalink = LINKTYPE_ETHERNET; - /* Convert ERF time to timeval - from libpcap. */ + /* Convert ERF time to SCTime_t */ uint64_t ts = dr.ts; p->ts = SCTIME_FROM_SECS(ts >> 32); ts = (ts & 0xffffffffULL) * 1000000; ts += 0x80000000; /* rounding */ uint64_t usecs = (ts >> 32); - if (usecs >= 1000000) { - usecs -= 1000000; - p->ts = SCTIME_ADD_SECS(p->ts, 1); - usecs++; - } p->ts = SCTIME_ADD_USECS(p->ts, usecs); etv->pkts++; diff --git a/src/source-ipfw.c b/src/source-ipfw.c index 75bd738fee02..7f31de71a84a 100644 --- a/src/source-ipfw.c +++ b/src/source-ipfw.c @@ -412,8 +412,7 @@ TmEcode ReceiveIPFWThreadDeinit(ThreadVars *tv, void *data) SCEnter(); - /* Attempt to shut the socket down...close instead? */ - if (shutdown(nq->fd, SHUT_RD) < 0) { + if (close(nq->fd) < 0) { SCLogWarning("Unable to disable ipfw socket: %s", strerror(errno)); SCReturnInt(TM_ECODE_FAILED); } diff --git a/src/source-napatech.c b/src/source-napatech.c index 5d5e1f674d15..bf4dc8d49ff1 100644 --- a/src/source-napatech.c +++ b/src/source-napatech.c @@ -946,19 +946,19 @@ TmEcode NapatechPacketLoop(ThreadVars *tv, void *data, void *slot) */ switch (NT_NET_GET_PKT_TIMESTAMP_TYPE(packet_buffer)) { case NT_TIMESTAMP_TYPE_NATIVE_UNIX: - p->ts = SCTIME_ADD_USECS(SCTIME_FROM_USECS(pkt_ts / 100000000), + p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS(pkt_ts / 100000000), ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0)); break; case NT_TIMESTAMP_TYPE_PCAP: - p->ts = SCTIME_ADD_USECS(SCTIME_FROM_USECS(pkt_ts >> 32), pkt_ts & 0xFFFFFFFF); + p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS(pkt_ts >> 32), pkt_ts & 0xFFFFFFFF); break; case NT_TIMESTAMP_TYPE_PCAP_NANOTIME: - p->ts = SCTIME_ADD_USECS(SCTIME_FROM_USECS(pkt_ts >> 32), + p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS(pkt_ts >> 32), ((pkt_ts & 0xFFFFFFFF) / 1000) + ((pkt_ts % 1000) > 500 ? 1 : 0)); break; case NT_TIMESTAMP_TYPE_NATIVE_NDIS: /* number of seconds between 1/1/1601 and 1/1/1970 */ - p->ts = SCTIME_ADD_USECS(SCTIME_FROM_USECS((pkt_ts / 100000000) - 11644473600), + p->ts = SCTIME_ADD_USECS(SCTIME_FROM_SECS((pkt_ts / 100000000) - 11644473600), ((pkt_ts % 100000000) / 100) + ((pkt_ts % 100) > 50 ? 1 : 0)); break; default: diff --git a/src/source-netmap.c b/src/source-netmap.c index 0b04b41b52d6..8e409ea00e6f 100644 --- a/src/source-netmap.c +++ b/src/source-netmap.c @@ -453,6 +453,7 @@ static int NetmapOpen(NetmapIfaceSettings *ns, NetmapDevice **pdevice, int verbo } } + SCMutexUnlock(&netmap_devlist_lock); NetmapCloseAll(); FatalError("opening devname %s failed: %s", devname, strerror(errno)); } diff --git a/src/source-pcap-file-helper.c b/src/source-pcap-file-helper.c index 936b65fb3d9f..4984a44bff76 100644 --- a/src/source-pcap-file-helper.c +++ b/src/source-pcap-file-helper.c @@ -251,6 +251,7 @@ TmEcode ValidateLinkType(int datalink, DecoderFunc *DecoderFn) *DecoderFn = DecodePPP; break; case LINKTYPE_IPV4: + case LINKTYPE_IPV6: case LINKTYPE_RAW: case LINKTYPE_RAW2: case LINKTYPE_GRE_OVER_IP: diff --git a/src/source-pfring.c b/src/source-pfring.c index 96da94eff533..10eac2fe57c7 100644 --- a/src/source-pfring.c +++ b/src/source-pfring.c @@ -430,6 +430,7 @@ TmEcode ReceivePfringLoop(ThreadVars *tv, void *data, void *slot) } } else if (unlikely(r == 0)) { if (suricata_ctl_flags & SURICATA_STOP) { + TmqhOutputPacketpool(ptv->tv, p); SCReturnInt(TM_ECODE_OK); } @@ -701,6 +702,7 @@ TmEcode ReceivePfringThreadDeinit(ThreadVars *tv, void *data) } pfring_close(ptv->pd); + SCFree(ptv); return TM_ECODE_OK; } diff --git a/src/suricata.c b/src/suricata.c index 7f979a7fbfcc..5c8c7cfafeed 100644 --- a/src/suricata.c +++ b/src/suricata.c @@ -127,6 +127,7 @@ #include "util-ebpf.h" #include "util-exception-policy.h" #include "util-host-os-info.h" +#include "util-hugepages.h" #include "util-ioctl.h" #include "util-landlock.h" #include "util-luajit.h" @@ -742,6 +743,12 @@ static void PrintBuildInfo(void) #ifdef HAVE_LUA strlcat(features, "HAVE_LUA ", sizeof(features)); #endif +#ifdef HAVE_JA3 + strlcat(features, "HAVE_JA3 ", sizeof(features)); +#endif +#ifdef HAVE_JA4 + strlcat(features, "HAVE_JA4 ", sizeof(features)); +#endif #ifdef HAVE_LUAJIT strlcat(features, "HAVE_LUAJIT ", sizeof(features)); #endif @@ -766,6 +773,9 @@ static void PrintBuildInfo(void) strlcat(features, "RUST ", sizeof(features)); #if defined(SC_ADDRESS_SANITIZER) strlcat(features, "ASAN ", sizeof(features)); +#endif +#if defined(HAVE_POPCNT64) + strlcat(features, "POPCNT64 ", sizeof(features)); #endif if (strlen(features) == 0) { strlcat(features, "none", sizeof(features)); @@ -783,6 +793,9 @@ static void PrintBuildInfo(void) #endif #if defined(__SSE3__) strlcat(features, "SSE_3 ", sizeof(features)); +#endif +#if defined(__SSE2__) + strlcat(features, "SSE_2 ", sizeof(features)); #endif if (strlen(features) == 0) { strlcat(features, "none", sizeof(features)); @@ -880,9 +893,6 @@ int g_ut_covered; void RegisterAllModules(void) { - // zero all module storage - memset(tmm_modules, 0, TMM_SIZE * sizeof(TmModule)); - /* commanders */ TmModuleUnixManagerRegister(); /* managers */ @@ -2671,6 +2681,10 @@ int PostConfLoadedSetup(SCInstance *suri) MacSetRegisterFlowStorage(); +#ifdef HAVE_PLUGINS + SCPluginsLoad(suri->capture_plugin_name, suri->capture_plugin_args); +#endif + LiveDeviceFinalize(); // must be after EBPF extension registration RunModeEngineIsIPS( @@ -2683,6 +2697,10 @@ int PostConfLoadedSetup(SCInstance *suri) SetMasterExceptionPolicy(); + /* Must occur prior to output mod registration + and app layer setup. */ + FeatureTrackingRegister(); + AppLayerSetup(); /* Suricata will use this umask if provided. By default it will use the @@ -2740,11 +2758,7 @@ int PostConfLoadedSetup(SCInstance *suri) SCReturnInt(TM_ECODE_FAILED); } - FeatureTrackingRegister(); /* must occur prior to output mod registration */ RegisterAllModules(); -#ifdef HAVE_PLUGINS - SCPluginsLoad(suri->capture_plugin_name, suri->capture_plugin_args); -#endif AppLayerHtpNeedFileInspection(); StorageFinalize(); @@ -2868,6 +2882,10 @@ int InitGlobal(void) ConfInit(); VarNameStoreInit(); + + // zero all module storage + memset(tmm_modules, 0, TMM_SIZE * sizeof(TmModule)); + return 0; } @@ -2970,6 +2988,10 @@ int SuricataMain(int argc, char **argv) goto out; } + SystemHugepageSnapshot *prerun_snap = NULL; + if (run_mode == RUNMODE_DPDK) + prerun_snap = SystemHugepageSnapshotCreate(); + SCSetStartTime(&suricata); RunModeDispatch(suricata.run_mode, suricata.runmode_custom_mode, suricata.capture_plugin_name, suricata.capture_plugin_args); @@ -3027,9 +3049,12 @@ int SuricataMain(int argc, char **argv) OnNotifyRunning(); PostRunStartedDetectSetup(&suricata); - - DPDKEvaluateHugepages(); - + if (run_mode == RUNMODE_DPDK) { // only DPDK uses hpages at the moment + SystemHugepageSnapshot *postrun_snap = SystemHugepageSnapshotCreate(); + SystemHugepageEvaluateHugepages(prerun_snap, postrun_snap); + SystemHugepageSnapshotDestroy(prerun_snap); + SystemHugepageSnapshotDestroy(postrun_snap); + } SCPledge(); SuricataMainLoop(&suricata); diff --git a/src/tests/detect-bsize.c b/src/tests/detect-bsize.c index 2fcd65658990..f0b13a8944a6 100644 --- a/src/tests/detect-bsize.c +++ b/src/tests/detect-bsize.c @@ -19,7 +19,7 @@ #define TEST_OK(str, m, lo, hi) \ { \ - DetectU64Data *bsz = DetectBsizeParse((str)); \ + DetectU64Data *bsz = DetectU64Parse((str)); \ FAIL_IF_NULL(bsz); \ FAIL_IF_NOT(bsz->mode == (m)); \ DetectBsizeFree(NULL, bsz); \ @@ -27,7 +27,7 @@ } #define TEST_FAIL(str) \ { \ - DetectU64Data *bsz = DetectBsizeParse((str)); \ + DetectU64Data *bsz = DetectU64Parse((str)); \ FAIL_IF_NOT_NULL(bsz); \ } diff --git a/src/tests/detect-engine-content-inspection.c b/src/tests/detect-engine-content-inspection.c index ee1b605f2c0d..8539d465de4b 100644 --- a/src/tests/detect-engine-content-inspection.c +++ b/src/tests/detect-engine-content-inspection.c @@ -273,6 +273,17 @@ static int DetectEngineContentInspectionTest13(void) { TEST_FOOTER; } +/** \brief negative distance */ +static int DetectEngineContentInspectionTest17(void) +{ + TEST_HEADER; + TEST_RUN("aaabbbcccdddee", 14, + "content:\"aaa\"; content:\"ee\"; within:2; distance:9; content:\"bbb\"; within:3; " + "distance:-11; content:\"ccc\"; within:3; distance:0;", + true, 4); + TEST_FOOTER; +} + void DetectEngineContentInspectionRegisterTests(void) { UtRegisterTest("DetectEngineContentInspectionTest01", @@ -301,6 +312,8 @@ void DetectEngineContentInspectionRegisterTests(void) DetectEngineContentInspectionTest12); UtRegisterTest("DetectEngineContentInspectionTest13 mix startswith/endswith", DetectEngineContentInspectionTest13); + UtRegisterTest("DetectEngineContentInspectionTest17 negative distance", + DetectEngineContentInspectionTest17); } #undef TEST_HEADER diff --git a/src/tests/detect-http-client-body.c b/src/tests/detect-http-client-body.c index c87d66756b9f..bbeb4d33bde8 100644 --- a/src/tests/detect-http-client-body.c +++ b/src/tests/detect-http-client-body.c @@ -157,6 +157,7 @@ static int RunTest (struct TestSteps *steps, const char *sig, const char *yaml) int i = 0; while (b->input != NULL) { SCLogDebug("chunk %p %d", b, i); + (void)i; Packet *p = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FAIL_IF_NULL(p); p->flow = &f; diff --git a/src/tests/detect-http-server-body.c b/src/tests/detect-http-server-body.c index 29340fb4aa77..d9723d4a9df3 100644 --- a/src/tests/detect-http-server-body.c +++ b/src/tests/detect-http-server-body.c @@ -119,6 +119,7 @@ static int RunTest(struct TestSteps *steps, const char *sig, const char *yaml) int i = 0; while (b->input != NULL) { SCLogDebug("chunk %p %d", b, i); + (void)i; Packet *p = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FAIL_IF_NULL(p); p->flow = &f; diff --git a/src/tests/fuzz/confyaml.c b/src/tests/fuzz/confyaml.c index d36e8e31339e..e67e40a34727 100644 --- a/src/tests/fuzz/confyaml.c +++ b/src/tests/fuzz/confyaml.c @@ -1,4 +1,4 @@ -const char configNoChecksum[] = "\ +const char *configNoChecksum = "\ %YAML 1.1\n\ ---\n\ pcap-file:\n\ diff --git a/src/tests/fuzz/fuzz_applayerparserparse.c b/src/tests/fuzz/fuzz_applayerparserparse.c index 0ee263fc0b0c..820f32374464 100644 --- a/src/tests/fuzz/fuzz_applayerparserparse.c +++ b/src/tests/fuzz/fuzz_applayerparserparse.c @@ -21,7 +21,7 @@ int LLVMFuzzerInitialize(int *argc, char ***argv); AppLayerParserThreadCtx *alp_tctx = NULL; -#include "confyaml.c" +extern const char *configNoChecksum; /* input buffer is structured this way : * 6 bytes header, diff --git a/src/tests/fuzz/fuzz_applayerprotodetectgetproto.c b/src/tests/fuzz/fuzz_applayerprotodetectgetproto.c index 598e7cc03ff6..34ec11e16fe5 100644 --- a/src/tests/fuzz/fuzz_applayerprotodetectgetproto.c +++ b/src/tests/fuzz/fuzz_applayerprotodetectgetproto.c @@ -18,7 +18,7 @@ //rule of thumb constant, so as not to timeout target #define PROTO_DETECT_MAX_LEN 1024 -#include "confyaml.c" +extern const char *configNoChecksum; int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size); diff --git a/src/tests/fuzz/fuzz_predefpcap_aware.c b/src/tests/fuzz/fuzz_predefpcap_aware.c index c20e3d341d52..1b230021f342 100644 --- a/src/tests/fuzz/fuzz_predefpcap_aware.c +++ b/src/tests/fuzz/fuzz_predefpcap_aware.c @@ -42,7 +42,7 @@ void *fwd; SCInstance surifuzz; SC_ATOMIC_EXTERN(unsigned int, engine_stage); -#include "confyaml.c" +extern const char *configNoChecksum; char *filepath = NULL; diff --git a/src/tests/fuzz/fuzz_sigpcap.c b/src/tests/fuzz/fuzz_sigpcap.c index e5bd56deb476..216aa926deb0 100644 --- a/src/tests/fuzz/fuzz_sigpcap.c +++ b/src/tests/fuzz/fuzz_sigpcap.c @@ -42,7 +42,7 @@ void *fwd; SCInstance surifuzz; SC_ATOMIC_EXTERN(unsigned int, engine_stage); -#include "confyaml.c" +extern const char *configNoChecksum; int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { diff --git a/src/tests/fuzz/fuzz_sigpcap_aware.c b/src/tests/fuzz/fuzz_sigpcap_aware.c index d2454769859b..ea34925f78de 100644 --- a/src/tests/fuzz/fuzz_sigpcap_aware.c +++ b/src/tests/fuzz/fuzz_sigpcap_aware.c @@ -42,7 +42,7 @@ void *fwd; SCInstance surifuzz; SC_ATOMIC_EXTERN(unsigned int, engine_stage); -#include "confyaml.c" +extern const char *configNoChecksum; static void SigGenerateAware(const uint8_t *data, size_t size, char *r, size_t *len) { diff --git a/src/tests/output-json-stats.c b/src/tests/output-json-stats.c new file mode 100644 index 000000000000..332a819fee86 --- /dev/null +++ b/src/tests/output-json-stats.c @@ -0,0 +1,135 @@ +/* Copyright (C) 2024 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include "../suricata-common.h" + +#include "../output-json-stats.h" + +#include "../util-unittest.h" + +static int OutputJsonStatsTest01(void) +{ + StatsRecord total_records[] = { { 0 }, { 0 } }; + StatsRecord thread_records[2]; + thread_records[0].name = "capture.kernel_packets"; + thread_records[0].short_name = "kernel_packets"; + thread_records[0].tm_name = "W#01-bond0.30"; + thread_records[0].value = 42; + thread_records[1].name = "capture.kernel_drops"; + thread_records[1].short_name = "kernel_drops"; + thread_records[1].tm_name = "W#01-bond0.30"; + thread_records[1].value = 4711; + + StatsTable table = { + .nstats = 2, + .stats = &total_records[0], + .ntstats = 1, + .tstats = &thread_records[0], + }; + + json_t *r = StatsToJSON(&table, JSON_STATS_TOTALS | JSON_STATS_THREADS); + if (!r) + return 0; + + // Remove variable content + json_object_del(r, "uptime"); + + char *serialized = json_dumps(r, 0); + + // Cheesy comparison + const char *expected = "{\"threads\": {\"W#01-bond0.30\": {\"capture\": {\"kernel_packets\": " + "42, \"kernel_drops\": 4711}}}}"; + + int cmp_result = strcmp(expected, serialized); + if (cmp_result != 0) + printf("unexpected result\nexpected=%s\ngot=%s\n", expected, serialized); + + free(serialized); + json_decref(r); + + return cmp_result == 0; +} + +static int OutputJsonStatsTest02(void) +{ + StatsRecord total_records[4] = { 0 }; + StatsRecord thread_records[8] = { 0 }; + + // Totals + total_records[0].name = "tcp.syn"; + total_records[0].short_name = "syn"; + total_records[0].tm_name = NULL; + total_records[0].value = 1234; + + // Worker + // thread_records[0] is a global counter + thread_records[1].name = "capture.kernel_packets"; + thread_records[1].short_name = "kernel_packets"; + thread_records[1].tm_name = "W#01-bond0.30"; + thread_records[1].value = 42; + thread_records[2].name = "capture.kernel_drops"; + thread_records[2].short_name = "kernel_drops"; + thread_records[2].tm_name = "W#01-bond0.30"; + thread_records[2].value = 4711; + // thread_records[3] is a FM specific counter + + // Flow manager + // thread_records[4] is a global counter + // thread_records[5] is a worker specific counter + // thread_records[6] is a worker specific counter + thread_records[7].name = "flow.mgr.full_hash_passes"; + thread_records[7].short_name = "full_hash_passes"; + thread_records[7].tm_name = "FM#01"; + thread_records[7].value = 10; + + StatsTable table = { + .nstats = 4, + .stats = &total_records[0], + .ntstats = 2, + .tstats = &thread_records[0], + }; + + json_t *r = StatsToJSON(&table, JSON_STATS_TOTALS | JSON_STATS_THREADS); + if (!r) + return 0; + + // Remove variable content + json_object_del(r, "uptime"); + + char *serialized = json_dumps(r, 0); + + // Cheesy comparison + const char *expected = "{\"tcp\": {\"syn\": 1234}, \"threads\": {\"W#01-bond0.30\": " + "{\"capture\": {\"kernel_packets\": " + "42, \"kernel_drops\": 4711}}, \"FM#01\": {\"flow\": {\"mgr\": " + "{\"full_hash_passes\": 10}}}}}"; + + int cmp_result = strcmp(expected, serialized); + if (cmp_result != 0) + printf("unexpected result\nexpected=%s\ngot=%s\n", expected, serialized); + + free(serialized); + json_decref(r); + + return cmp_result == 0; +} + +void OutputJsonStatsRegisterTests(void) +{ + UtRegisterTest("OutputJsonStatsTest01", OutputJsonStatsTest01); + UtRegisterTest("OutputJsonStatsTest02", OutputJsonStatsTest02); +} diff --git a/src/tm-threads.c b/src/tm-threads.c index b173cb84f442..f8eb88a7b485 100644 --- a/src/tm-threads.c +++ b/src/tm-threads.c @@ -1241,13 +1241,17 @@ static int TmThreadKillThread(ThreadVars *tv) } if (tv->inq != NULL) { for (int i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) { + SCMutexLock(&tv->inq->pq->mutex_q); SCCondSignal(&tv->inq->pq->cond_q); + SCMutexUnlock(&tv->inq->pq->mutex_q); } SCLogDebug("signalled tv->inq->id %" PRIu32 "", tv->inq->id); } if (tv->ctrl_cond != NULL ) { + SCCtrlMutexLock(tv->ctrl_mutex); pthread_cond_broadcast(tv->ctrl_cond); + SCCtrlMutexUnlock(tv->ctrl_mutex); } return 0; } @@ -1427,7 +1431,9 @@ void TmThreadDisableReceiveThreads(void) if (tv->inq != NULL) { for (int i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) { + SCMutexLock(&tv->inq->pq->mutex_q); SCCondSignal(&tv->inq->pq->cond_q); + SCMutexUnlock(&tv->inq->pq->mutex_q); } SCLogDebug("signalled tv->inq->id %" PRIu32 "", tv->inq->id); } @@ -1507,7 +1513,9 @@ void TmThreadDisablePacketThreads(void) * THV_KILL flag. */ if (tv->inq != NULL) { for (int i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) { + SCMutexLock(&tv->inq->pq->mutex_q); SCCondSignal(&tv->inq->pq->cond_q); + SCMutexUnlock(&tv->inq->pq->mutex_q); } SCLogDebug("signalled tv->inq->id %" PRIu32 "", tv->inq->id); } @@ -1791,6 +1799,63 @@ void TmThreadContinue(ThreadVars *tv) return; } +static TmEcode WaitOnThreadsRunningByType(const int t) +{ + struct timeval start_ts; + struct timeval cur_ts; + uint32_t thread_cnt = 0; + + /* on retries, this will init to the last thread that started up already */ + ThreadVars *tv_start = tv_root[t]; + SCMutexLock(&tv_root_lock); + for (ThreadVars *tv = tv_start; tv != NULL; tv = tv->next) { + thread_cnt++; + } + SCMutexUnlock(&tv_root_lock); + + /* give threads a second each to start up, plus a margin of a minute. */ + uint32_t time_budget = 60 + thread_cnt; + + gettimeofday(&start_ts, NULL); +again: + SCMutexLock(&tv_root_lock); + ThreadVars *tv = tv_start; + while (tv != NULL) { + if (TmThreadsCheckFlag(tv, (THV_FAILED | THV_CLOSED | THV_DEAD))) { + SCMutexUnlock(&tv_root_lock); + + SCLogError("thread \"%s\" failed to " + "start: flags %04x", + tv->name, SC_ATOMIC_GET(tv->flags)); + return TM_ECODE_FAILED; + } + + if (!(TmThreadsCheckFlag(tv, THV_RUNNING | THV_RUNNING_DONE))) { + SCMutexUnlock(&tv_root_lock); + + /* 60 seconds provided for the thread to transition from + * THV_INIT_DONE to THV_RUNNING */ + gettimeofday(&cur_ts, NULL); + if (((uint32_t)cur_ts.tv_sec - (uint32_t)start_ts.tv_sec) > time_budget) { + SCLogError("thread \"%s\" failed to " + "start in time: flags %04x. Total threads: %u. Time budget %us", + tv->name, SC_ATOMIC_GET(tv->flags), thread_cnt, time_budget); + return TM_ECODE_FAILED; + } + + /* sleep a little to give the thread some + * time to start running */ + SleepUsec(100); + goto again; + } + tv_start = tv; + + tv = tv->next; + } + SCMutexUnlock(&tv_root_lock); + return TM_ECODE_OK; +} + /** * \brief Waits for all threads to be in a running state * @@ -1804,45 +1869,12 @@ TmEcode TmThreadWaitOnThreadRunning(void) uint16_t FR_num = 0; uint16_t TX_num = 0; - struct timeval start_ts; - struct timeval cur_ts; - gettimeofday(&start_ts, NULL); - -again: - SCMutexLock(&tv_root_lock); for (int i = 0; i < TVT_MAX; i++) { - ThreadVars *tv = tv_root[i]; - while (tv != NULL) { - if (TmThreadsCheckFlag(tv, (THV_FAILED | THV_CLOSED | THV_DEAD))) { - SCMutexUnlock(&tv_root_lock); - - SCLogError("thread \"%s\" failed to " - "start: flags %04x", - tv->name, SC_ATOMIC_GET(tv->flags)); - return TM_ECODE_FAILED; - } - - if (!(TmThreadsCheckFlag(tv, THV_RUNNING | THV_RUNNING_DONE))) { - SCMutexUnlock(&tv_root_lock); - - /* 60 seconds provided for the thread to transition from - * THV_INIT_DONE to THV_RUNNING */ - gettimeofday(&cur_ts, NULL); - if ((cur_ts.tv_sec - start_ts.tv_sec) > 60) { - SCLogError("thread \"%s\" failed to " - "start in time: flags %04x", - tv->name, SC_ATOMIC_GET(tv->flags)); - return TM_ECODE_FAILED; - } - - /* sleep a little to give the thread some - * time to start running */ - SleepUsec(100); - goto again; - } - tv = tv->next; - } + if (WaitOnThreadsRunningByType(i) != TM_ECODE_OK) + return TM_ECODE_FAILED; } + + SCMutexLock(&tv_root_lock); for (int i = 0; i < TVT_MAX; i++) { for (ThreadVars *tv = tv_root[i]; tv != NULL; tv = tv->next) { if (strncmp(thread_name_autofp, tv->name, strlen(thread_name_autofp)) == 0) @@ -2205,6 +2237,8 @@ bool TmThreadsTimeSubsysIsReady(void) Thread *t = &thread_store.threads[s]; if (!t->in_use) break; + if (t->type != TVT_PPT) + continue; if (t->sys_sec_stamp == 0) { ready = false; break; @@ -2223,6 +2257,8 @@ void TmThreadsInitThreadsTimestamp(const SCTime_t ts) Thread *t = &thread_store.threads[s]; if (!t->in_use) break; + if (t->type != TVT_PPT) + continue; t->pktts = ts; t->sys_sec_stamp = (uint32_t)systs.tv_sec; } @@ -2243,6 +2279,9 @@ void TmThreadsGetMinimalTimestamp(struct timeval *ts) Thread *t = &thread_store.threads[s]; if (t->in_use == 0) break; + /* only packet threads set timestamps based on packets */ + if (t->type != TVT_PPT) + continue; struct timeval pkttv = { .tv_sec = SCTIME_SECS(t->pktts), .tv_usec = SCTIME_USECS(t->pktts) }; if (!(timercmp(&pkttv, &nullts, ==))) { @@ -2298,7 +2337,9 @@ void TmThreadsInjectFlowById(Flow *f, const int id) /* wake up listening thread(s) if necessary */ if (tv->inq != NULL) { + SCMutexLock(&tv->inq->pq->mutex_q); SCCondSignal(&tv->inq->pq->cond_q); + SCMutexUnlock(&tv->inq->pq->mutex_q); } else if (tv->break_loop) { TmThreadsCaptureBreakLoop(tv); } diff --git a/src/tmqh-simple.c b/src/tmqh-simple.c index 47faed5702c5..0bfa173e5009 100644 --- a/src/tmqh-simple.c +++ b/src/tmqh-simple.c @@ -76,8 +76,11 @@ void TmqhInputSimpleShutdownHandler(ThreadVars *tv) return; } - for (i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) + for (i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) { + SCMutexLock(&tv->inq->pq->mutex_q); SCCondSignal(&tv->inq->pq->cond_q); + SCMutexUnlock(&tv->inq->pq->mutex_q); + } } void TmqhOutputSimple(ThreadVars *t, Packet *p) diff --git a/src/util-action.c b/src/util-action.c index 1b24bc5b0792..d3ea1b631a17 100644 --- a/src/util-action.c +++ b/src/util-action.c @@ -53,15 +53,22 @@ uint8_t action_order_sigs[4] = {ACTION_PASS, ACTION_DROP, ACTION_REJECT, ACTION_ uint8_t ActionOrderVal(uint8_t action) { /* reject_both and reject_dst have the same prio as reject */ - if( (action & ACTION_REJECT) || - (action & ACTION_REJECT_BOTH) || - (action & ACTION_REJECT_DST)) { + if (action & ACTION_REJECT_ANY) { action = ACTION_REJECT; + } else if (action & ACTION_DROP) { + action = ACTION_DROP; + } else if (action & ACTION_PASS) { + action = ACTION_PASS; + } else if (action & ACTION_ALERT) { + action = ACTION_ALERT; + } else if (action == 0) { + action = ACTION_ALERT; } - uint8_t i = 0; - for (; i < 4; i++) { - if (action_order_sigs[i] == action) + + for (uint8_t i = 0; i < 4; i++) { + if (action_order_sigs[i] == action) { return i; + } } /* Unknown action, set just a low prio (high val) */ return 10; diff --git a/src/util-base64.c b/src/util-base64.c index 4a4a5d122c41..d973f0e86d13 100644 --- a/src/util-base64.c +++ b/src/util-base64.c @@ -156,6 +156,8 @@ Base64Ecode DecodeBase64(uint8_t *dest, uint32_t dest_size, const uint8_t *src, ecode = BASE64_ECODE_BUF; break; } + if (dest_size - *decoded_bytes < ASCII_BLOCK) + return BASE64_ECODE_BUF; /* Decode base-64 block into ascii block and move pointer */ DecodeBase64Block(dptr, b64); @@ -183,7 +185,7 @@ Base64Ecode DecodeBase64(uint8_t *dest, uint32_t dest_size, const uint8_t *src, /* if the destination size is not at least 3 Bytes long, it'll give a dynamic * buffer overflow while decoding, so, return and let the caller take care of the * remaining bytes to be decoded which should always be < 4 at this stage */ - if (dest_size - *decoded_bytes < 3) + if (dest_size - *decoded_bytes < ASCII_BLOCK) return BASE64_ECODE_BUF; *decoded_bytes += numDecoded_blk; DecodeBase64Block(dptr, b64); @@ -193,6 +195,8 @@ Base64Ecode DecodeBase64(uint8_t *dest, uint32_t dest_size, const uint8_t *src, /* Finish remaining b64 bytes by padding */ if (valid && bbidx > 0 && (mode != BASE64_MODE_RFC2045)) { /* Decode remaining */ + if (dest_size - *decoded_bytes < ASCII_BLOCK) + return BASE64_ECODE_BUF; *decoded_bytes += ASCII_BLOCK - (B64_BLOCK - bbidx); DecodeBase64Block(dptr, b64); } diff --git a/src/util-decode-mime.c b/src/util-decode-mime.c index 5e7a8d5713f4..eb67c3d52d9f 100644 --- a/src/util-decode-mime.c +++ b/src/util-decode-mime.c @@ -2439,6 +2439,7 @@ MimeDecParseState * MimeDecInitParser(void *data, PushStack(state->stack); if (state->stack->top == NULL) { SCFree(state->stack); + SCFree(state->msg); SCFree(state); return NULL; } diff --git a/src/util-device.c b/src/util-device.c index b624cf07342b..82f77a8ce23e 100644 --- a/src/util-device.c +++ b/src/util-device.c @@ -584,8 +584,11 @@ void LiveDevAddBypassSuccess(LiveDevice *dev, uint64_t cnt, int family) #ifdef BUILD_UNIX_SOCKET TmEcode LiveDeviceGetBypassedStats(json_t *cmd, json_t *answer, void *data) { + if (g_bypass_storage_id.id < 0) { + json_object_set_new(answer, "message", json_string("Bypass not enabled")); + SCReturnInt(TM_ECODE_FAILED); + } LiveDevice *ldev = NULL, *ndev = NULL; - json_t *ifaces = NULL; while(LiveDeviceForEach(&ldev, &ndev)) { BypassInfo *bpinfo = LiveDevGetStorageById(ldev, g_bypass_storage_id); diff --git a/src/util-dpdk-ice.c b/src/util-dpdk-ice.c index 36f4481dea2f..4b714d88c353 100644 --- a/src/util-dpdk-ice.c +++ b/src/util-dpdk-ice.c @@ -35,7 +35,7 @@ #ifdef HAVE_DPDK -void iceDeviceSetRSSHashFunction(uint64_t *rss_hf) +static void iceDeviceSetRSSHashFunction(uint64_t *rss_hf) { #if RTE_VERSION < RTE_VERSION_NUM(20, 0, 0, 0) *rss_hf = RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_FRAG_IPV6 | @@ -46,6 +46,16 @@ void iceDeviceSetRSSHashFunction(uint64_t *rss_hf) #endif } +void iceDeviceSetRSSConf(struct rte_eth_rss_conf *rss_conf) +{ + iceDeviceSetRSSHashFunction(&rss_conf->rss_hf); +#if RTE_VERSION < RTE_VERSION_NUM(23, 11, 0, 0) + rss_conf->rss_key_len = 40; +#else + rss_conf->rss_key_len = 52; +#endif +} + #endif /* HAVE_DPDK */ /** * @} diff --git a/src/util-dpdk-ice.h b/src/util-dpdk-ice.h index cdc2185cca12..d535fa092f9f 100644 --- a/src/util-dpdk-ice.h +++ b/src/util-dpdk-ice.h @@ -28,7 +28,9 @@ #ifdef HAVE_DPDK -void iceDeviceSetRSSHashFunction(uint64_t *rss_conf); +#include "util-dpdk.h" + +void iceDeviceSetRSSConf(struct rte_eth_rss_conf *rss_conf); #endif /* HAVE_DPDK */ diff --git a/src/util-dpdk.c b/src/util-dpdk.c index 089aa45674ae..13329a81d13a 100644 --- a/src/util-dpdk.c +++ b/src/util-dpdk.c @@ -66,106 +66,7 @@ void DPDKFreeDevice(LiveDevice *ldev) #endif } -static FILE *HugepagesMeminfoOpen(void) -{ - FILE *fp = fopen("/proc/meminfo", "r"); - if (fp == NULL) { - SCLogInfo("Can't analyze hugepage usage: failed to open /proc/meminfo"); - } - return fp; -} - -static void HugepagesMeminfoClose(FILE *fp) -{ - if (fp) { - fclose(fp); - } -} - -/** - * Parsing values of meminfo - * - * \param fp Opened file pointer for reading of file /proc/meminfo at beginning - * \param keyword Entry to look for e.g. "HugePages_Free:" - * \return n Value of the entry - * \return -1 On error - * - */ -static int32_t MemInfoParseValue(FILE *fp, const char *keyword) -{ - char path[256], value_str[64]; - int32_t value = -1; - - while (fscanf(fp, "%255s", path) != EOF) { - if (strcmp(path, keyword) == 0) { - if (fscanf(fp, "%63s", value_str) == EOF) { - SCLogDebug("%s: not followed by any number", keyword); - break; - } - - if (StringParseInt32(&value, 10, 23, value_str) < 0) { - SCLogDebug("Failed to convert %s from /proc/meminfo", keyword); - value = -1; - } - break; - } - } - return value; -} - -static void MemInfoEvaluateHugepages(FILE *fp) -{ - int32_t free_hugepages = MemInfoParseValue(fp, "HugePages_Free:"); - if (free_hugepages < 0) { - SCLogInfo("HugePages_Free information not found in /proc/meminfo"); - return; - } - - rewind(fp); - - int32_t total_hugepages = MemInfoParseValue(fp, "HugePages_Total:"); - if (total_hugepages < 0) { - SCLogInfo("HugePages_Total information not found in /proc/meminfo"); - return; - } else if (total_hugepages == 0) { - SCLogInfo("HugePages_Total equals to zero"); - return; - } - - float free_hugepages_ratio = (float)free_hugepages / (float)total_hugepages; - if (free_hugepages_ratio > 0.5) { - SCLogInfo("%" PRIu32 " of %" PRIu32 - " of hugepages are free - number of hugepages can be lowered to e.g. %.0lf", - free_hugepages, total_hugepages, ceil((total_hugepages - free_hugepages) * 1.15)); - } -} - -static void MemInfoWith(void (*callback)(FILE *)) -{ - FILE *fp = HugepagesMeminfoOpen(); - if (fp) { - callback(fp); - HugepagesMeminfoClose(fp); - } -} - -void DPDKEvaluateHugepages(void) -{ - if (run_mode != RUNMODE_DPDK) - return; - -#ifdef HAVE_DPDK - if (rte_eal_has_hugepages() == 0) { // hugepages disabled - SCLogPerf("Hugepages not enabled - enabling hugepages can improve performance"); - return; - } -#endif - - MemInfoWith(MemInfoEvaluateHugepages); -} - #ifdef HAVE_DPDK - /** * Retrieves name of the port from port id * Not thread-safe diff --git a/src/util-dpdk.h b/src/util-dpdk.h index a94f46225217..1fb3532f5d4d 100644 --- a/src/util-dpdk.h +++ b/src/util-dpdk.h @@ -121,7 +121,6 @@ void DPDKCleanupEAL(void); void DPDKCloseDevice(LiveDevice *ldev); void DPDKFreeDevice(LiveDevice *ldev); -void DPDKEvaluateHugepages(void); #ifdef HAVE_DPDK const char *DPDKGetPortNameByPortID(uint16_t pid); diff --git a/src/util-error.c b/src/util-error.c index 01c2f9a01b73..e3195a13a355 100644 --- a/src/util-error.c +++ b/src/util-error.c @@ -47,6 +47,7 @@ const char * SCErrorToString(SCError err) CASE_CODE(SC_EINVAL); CASE_CODE(SC_ELIMIT); CASE_CODE(SC_EEXIST); + CASE_CODE(SC_ENOENT); CASE_CODE (SC_ERR_MAX); } diff --git a/src/util-error.h b/src/util-error.h index eaaf8cb34a2a..f1bc80dd7c3c 100644 --- a/src/util-error.h +++ b/src/util-error.h @@ -30,6 +30,7 @@ typedef enum { SC_EINVAL, SC_ELIMIT, SC_EEXIST, + SC_ENOENT, SC_ERR_MAX } SCError; diff --git a/src/util-file.c b/src/util-file.c index 0449a2edae6c..89ef50c1d4eb 100644 --- a/src/util-file.c +++ b/src/util-file.c @@ -235,8 +235,11 @@ uint16_t FileFlowFlagsToFlags(const uint16_t flow_file_flags, uint8_t direction) uint16_t flags = 0; if (direction == STREAM_TOSERVER) { - if ((flow_file_flags & (FLOWFILE_NO_STORE_TS | FLOWFILE_STORE)) == FLOWFILE_NO_STORE_TS) { + if ((flow_file_flags & (FLOWFILE_NO_STORE_TS | FLOWFILE_STORE_TS)) == + FLOWFILE_NO_STORE_TS) { flags |= FILE_NOSTORE; + } else if (flow_file_flags & FLOWFILE_STORE_TS) { + flags |= FILE_STORE; } if (flow_file_flags & FLOWFILE_NO_MAGIC_TS) { @@ -255,8 +258,11 @@ uint16_t FileFlowFlagsToFlags(const uint16_t flow_file_flags, uint8_t direction) flags |= FILE_NOSHA256; } } else { - if ((flow_file_flags & (FLOWFILE_NO_STORE_TC | FLOWFILE_STORE)) == FLOWFILE_NO_STORE_TC) { + if ((flow_file_flags & (FLOWFILE_NO_STORE_TC | FLOWFILE_STORE_TC)) == + FLOWFILE_NO_STORE_TC) { flags |= FILE_NOSTORE; + } else if (flow_file_flags & FLOWFILE_STORE_TC) { + flags |= FILE_STORE; } if (flow_file_flags & FLOWFILE_NO_MAGIC_TC) { @@ -275,9 +281,6 @@ uint16_t FileFlowFlagsToFlags(const uint16_t flow_file_flags, uint8_t direction) flags |= FILE_NOSHA256; } } - if (flow_file_flags & FLOWFILE_STORE) { - flags |= FILE_STORE; - } DEBUG_VALIDATE_BUG_ON((flags & (FILE_STORE | FILE_NOSTORE)) == (FILE_STORE | FILE_NOSTORE)); SCLogDebug("direction %02x flags %02x", direction, flags); diff --git a/src/util-host-info.c b/src/util-host-info.c index c0dd93b80cfc..282b2fb24086 100644 --- a/src/util-host-info.c +++ b/src/util-host-info.c @@ -44,7 +44,6 @@ int SCKernelVersionIsAtLeast(int major, int minor) PCRE2_SIZE eo; int ret; int kmajor, kminor; - PCRE2_UCHAR **list; /* get local version */ if (uname(&kuname) != 0) { @@ -79,25 +78,36 @@ int SCKernelVersionIsAtLeast(int major, int minor) goto error; } - pcre2_substring_list_get(version_regex_match, &list, NULL); + char majorstr[32]; + size_t pcre2len = sizeof(majorstr); + ret = pcre2_substring_copy_bynumber( + version_regex_match, 1, (PCRE2_UCHAR8 *)majorstr, &pcre2len); + if (ret < 0) { + SCLogError("pcre2_substring_copy_bynumber failed"); + goto error; + } - bool err = false; - if (StringParseInt32(&kmajor, 10, 0, (const char *)list[1]) < 0) { - SCLogError("Invalid value for kmajor: '%s'", list[1]); - err = true; + char minorstr[32]; + pcre2len = sizeof(majorstr); + ret = pcre2_substring_copy_bynumber( + version_regex_match, 2, (PCRE2_UCHAR8 *)minorstr, &pcre2len); + if (ret < 0) { + SCLogError("pcre2_substring_copy_bynumber failed"); + goto error; } - if (StringParseInt32(&kminor, 10, 0, (const char *)list[2]) < 0) { - SCLogError("Invalid value for kminor: '%s'", list[2]); - err = true; + + if (StringParseInt32(&kmajor, 10, 0, (const char *)majorstr) < 0) { + SCLogError("Invalid value for kmajor: '%s'", minorstr); + goto error; + } + if (StringParseInt32(&kminor, 10, 0, (const char *)minorstr) < 0) { + SCLogError("Invalid value for kminor: '%s'", minorstr); + goto error; } - pcre2_substring_list_free((PCRE2_SPTR *)list); pcre2_match_data_free(version_regex_match); pcre2_code_free(version_regex); - if (err) - goto error; - if (kmajor > major) return 1; if (kmajor == major && kminor >= minor) diff --git a/src/util-hugepages.c b/src/util-hugepages.c new file mode 100644 index 000000000000..5ad351944037 --- /dev/null +++ b/src/util-hugepages.c @@ -0,0 +1,414 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Lukas Sismis + */ + +#include "suricata.h" +#include "util-debug.h" +#include "util-hugepages.h" +#include "util-path.h" + +static uint16_t SystemHugepageSizesCntPerNodeGet(uint16_t node_index); +static uint16_t SystemNodeCountGet(void); +static void SystemHugepagePerNodeGetHugepageSizes( + uint16_t node_index, uint16_t hp_sizes_cnt, uint32_t *hp_sizes); +static HugepageInfo *SystemHugepageHugepageInfoCreate(uint16_t hp_size_cnt); +static int16_t SystemHugepagePerNodeGetHugepageInfo(uint16_t node_index, NodeInfo *node); +static void SystemHugepageHugepageInfoDestroy(HugepageInfo *h); +static void SystemHugepageNodeInfoDestroy(NodeInfo *n); +static void SystemHugepageNodeInfoDump(NodeInfo *n); +static void SystemHugepageSnapshotDump(SystemHugepageSnapshot *s); + +typedef enum OSHugepageAction_ { + OS_UNKNOWN, // unknown/unsupported OS + OS_LINUX_SYS_DEVICES, +} OSHugepageAction; + +static OSHugepageAction SystemHugepageDetermineOS(void) +{ + // try Linux + if (SCPathExists("/sys/devices/system/node/")) { + return OS_LINUX_SYS_DEVICES; + } + + return OS_UNKNOWN; +} + +static bool SystemHugepageSupported(void) +{ + if (SystemHugepageDetermineOS() != OS_UNKNOWN) + return true; + return false; +} + +/** + * \brief Linux-specific function to detect number of NUMA nodes on the system + * \returns number of NUMA nodes, 0 on error + */ +static uint16_t SystemNodeCountGetLinux(void) +{ + char dir_path[] = "/sys/devices/system/node/"; + DIR *dir = opendir(dir_path); + if (dir == NULL) + FatalError("unable to open %s", dir_path); + + uint16_t count = 0; + struct dirent *entry; + while ((entry = readdir(dir)) != NULL) { + char d_name[] = "node"; + if (SCIsRegularDirectory(entry) && strncmp(entry->d_name, d_name, strlen(d_name)) == 0) + count++; + } + closedir(dir); + return count; +} + +/** + * \brief Linux-specific function to detect number of unique hugepage sizes + * \param[in] node_index index of the NUMA node + * \returns number of hugepage sizes, 0 on error + */ +static uint16_t SystemHugepageSizesCntPerNodeGetLinux(uint16_t node_index) +{ + char dir_path[256]; + snprintf(dir_path, sizeof(dir_path), "/sys/devices/system/node/node%d/hugepages/", node_index); + DIR *dir = opendir(dir_path); + if (dir == NULL) { + SCLogInfo("unable to open %s", dir_path); + return 0; + } + + uint16_t count = 0; + struct dirent *entry; + while ((entry = readdir(dir)) != NULL) { + char d_name[] = "hugepages-"; + if (SCIsRegularDirectory(entry) && strncmp(entry->d_name, d_name, strlen(d_name)) == 0) + count++; + } + closedir(dir); + return count; +} + +/** + * \brief Linux-specific function to detect unique hugepage sizes + * \note Arrays `hugepages` and `hp_sizes` are expected to have the same size + * \param[in] node_index index of the NUMA node + * \param[in] hp_sizes_cnt number of the unique hugepage sizes + * \param[out] hp_sizes a pointer to the array of hugepage sizes + */ +static void SystemHugepagePerNodeGetHugepageSizesLinux( + uint16_t node_index, uint16_t hp_sizes_cnt, uint32_t *hp_sizes) +{ + char dir_path[256]; + snprintf(dir_path, sizeof(dir_path), "/sys/devices/system/node/node%d/hugepages/", node_index); + DIR *dir = opendir(dir_path); + if (dir == NULL) + FatalError("unable to open %s", dir_path); + + uint16_t index = 0; + struct dirent *entry; + while ((entry = readdir(dir)) != NULL) { + if (SCIsRegularDirectory(entry) && strncmp(entry->d_name, "hugepages-", 10) == 0) { + sscanf(entry->d_name, "hugepages-%ukB", &(hp_sizes[index])); + index++; + } + } + closedir(dir); +} + +/** + * \brief Linux-specific function to detect number of unique hugepage sizes + * \note Arrays `hugepages` and `hp_sizes` are expected to have the same size + * \param[out] hugepages a pointer to the array of hugepage info structures + * \param[in] hp_sizes a pointer to the array of hugepage sizes + * \param[in] hp_sizes_cnt number of hugepage sizes + * \param[in] node_index index of the NUMA node + * \returns 0 on success, negative number on error + */ +static int16_t SystemHugepagePerNodeGetHugepageInfoLinux( + HugepageInfo *hugepages, uint32_t *hp_sizes, uint16_t hp_sizes_cnt, uint16_t node_index) +{ + for (int16_t i = 0; i < hp_sizes_cnt; i++) { + hugepages[i].size_kb = hp_sizes[i]; + char path[256]; + snprintf(path, sizeof(path), + "/sys/devices/system/node/node%hu/hugepages/hugepages-%ukB/nr_hugepages", + node_index, hp_sizes[i]); + FILE *f = fopen(path, "r"); + if (!f) { + SCLogInfo("unable to open %s", path); + return -SC_ENOENT; + } + if (fscanf(f, "%hu", &hugepages[i].allocated) != 1) { + SCLogInfo("failed to read the total number of allocated hugepages (%ukB) on node %hu", + hp_sizes[i], node_index); + fclose(f); + return -SC_EINVAL; + } + fclose(f); + + snprintf(path, sizeof(path), + "/sys/devices/system/node/node%hu/hugepages/hugepages-%ukB/free_hugepages", + node_index, hp_sizes[i]); + f = fopen(path, "r"); + if (!f) { + SCLogInfo("unable to open %s", path); + return -SC_ENOENT; + } + if (fscanf(f, "%hu", &hugepages[i].free) != 1) { + SCLogInfo("failed to read the total number of free hugepages (%ukB) on node %hu", + hp_sizes[i], node_index); + fclose(f); + return -SC_EINVAL; + } + fclose(f); + } + + return 0; +} + +/** + * \brief The function gathers information about hugepages on a given node + * \param[in] node_index index of the NUMA node + * \param[out] node a pointer to the structure to hold hugepage info + * \returns 0 on success, negative number on error + */ +static int16_t SystemHugepagePerNodeGetHugepageInfo(uint16_t node_index, NodeInfo *node) +{ + uint16_t hp_sizes_cnt = SystemHugepageSizesCntPerNodeGet(node_index); + if (hp_sizes_cnt == 0) { + SCLogInfo("hugepages not found for node %d", node_index); + return -SC_ENOENT; + } + uint32_t *hp_sizes = SCCalloc(hp_sizes_cnt, sizeof(*hp_sizes)); + if (hp_sizes == NULL) { + FatalError("failed to allocate memory for hugepage info"); + } + SystemHugepagePerNodeGetHugepageSizes(node_index, hp_sizes_cnt, hp_sizes); + + node->hugepages = SystemHugepageHugepageInfoCreate(hp_sizes_cnt); + node->num_hugepage_sizes = hp_sizes_cnt; + + int16_t ret = 0; + if (SystemHugepageDetermineOS() == OS_LINUX_SYS_DEVICES) + ret = SystemHugepagePerNodeGetHugepageInfoLinux( + node->hugepages, hp_sizes, node->num_hugepage_sizes, node_index); + + SCFree(hp_sizes); + return ret; +} + +/** + * \brief The function detects number of NUMA nodes on the system + * \returns 0 if detection is unsuccessful, otherwise number of detected nodes + */ +static uint16_t SystemNodeCountGet(void) +{ + if (SystemHugepageDetermineOS() == OS_LINUX_SYS_DEVICES) + return SystemNodeCountGetLinux(); + return 0; +} + +/** + * \brief The function detects the number of unique hugepage sizes + * \returns 0 if detection is unsuccessful, otherwise number of hugepage sizes + */ +static uint16_t SystemHugepageSizesCntPerNodeGet(uint16_t node_index) +{ + if (SystemHugepageDetermineOS() == OS_LINUX_SYS_DEVICES) + return SystemHugepageSizesCntPerNodeGetLinux(node_index); + return 0; +} + +/** + * \brief The function fills an array with unique hugepage sizes + * \note Arrays `hugepages` and `hp_sizes` are expected to have the same size + * \param[in] node_index index of the NUMA node + * \param[in] hp_sizes_cnt number of hugepage sizes + * \param[out] hp_sizes a pointer to the array of hugepage sizes + */ +static void SystemHugepagePerNodeGetHugepageSizes( + uint16_t node_index, uint16_t hp_sizes_cnt, uint32_t *hp_sizes) +{ + if (SystemHugepageDetermineOS() == OS_LINUX_SYS_DEVICES) + SystemHugepagePerNodeGetHugepageSizesLinux(node_index, hp_sizes_cnt, hp_sizes); +} + +static HugepageInfo *SystemHugepageHugepageInfoCreate(uint16_t hp_size_cnt) +{ + HugepageInfo *h = SCCalloc(hp_size_cnt, sizeof(*h)); + if (h == NULL) { + FatalError("failed to allocate hugepage info array"); + } + return h; +} + +static void SystemHugepageHugepageInfoDestroy(HugepageInfo *h) +{ + if (h != NULL) + SCFree(h); +} + +static void SystemHugepageNodeInfoDestroy(NodeInfo *n) +{ + if (n == NULL) + return; + + SystemHugepageHugepageInfoDestroy(n->hugepages); +} + +static void SystemHugepageNodeInfoDump(NodeInfo *n) +{ + if (n == NULL) + return; + + for (uint16_t i = 0; i < n->num_hugepage_sizes; i++) { + SCLogDebug("Hugepage size - %dkB - allocated: %d free: %d", n->hugepages[i].size_kb, + n->hugepages[i].allocated, n->hugepages[i].free); + } +} + +/** + * \brief The function prints out the hugepage snapshot + * \param[in] s a pointer to the snapshot + */ +static void SystemHugepageSnapshotDump(SystemHugepageSnapshot *s) +{ + if (s == NULL) + return; + + for (uint16_t i = 0; i < s->num_nodes; i++) { + SCLogDebug("NUMA Node %d", i); + SystemHugepageNodeInfoDump(&(s->nodes[i])); + } +} + +void SystemHugepageSnapshotDestroy(SystemHugepageSnapshot *s) +{ + if (s == NULL) + return; + + for (uint16_t i = 0; i < s->num_nodes; i++) { + SystemHugepageNodeInfoDestroy(&(s->nodes[i])); + } + SCFree(s->nodes); + SCFree(s); +} + +/** + * \brief The function creates a snapshot of the system's hugepage usage + * per NUMA node and per hugepage size. + * The snapshot is used to evaluate the system's hugepage usage after + * initialization of Suricata. + * \returns a pointer to the snapshot, NULL on error + */ +SystemHugepageSnapshot *SystemHugepageSnapshotCreate(void) +{ + if (!SystemHugepageSupported()) + return NULL; + + uint16_t node_cnt = SystemNodeCountGet(); + if (node_cnt == 0) { + SCLogInfo("hugepage snapshot failed - cannot obtain number of NUMA nodes in the system"); + return NULL; + } + NodeInfo *nodes = SCCalloc(node_cnt, sizeof(*nodes)); + if (nodes == NULL) { + FatalError("failed to allocate memory for NUMA node info"); + } + + SystemHugepageSnapshot *s = SCCalloc(1, sizeof(*s)); + if (s == NULL) { + SCFree(nodes); + FatalError("failed to allocate memory for NUMA node snapshot"); + } + s->num_nodes = node_cnt; + s->nodes = nodes; + + for (uint16_t i = 0; i < s->num_nodes; i++) { + int16_t ret = SystemHugepagePerNodeGetHugepageInfo(i, &s->nodes[i]); + if (ret != 0) { + SystemHugepageSnapshotDestroy(s); + return NULL; + } + } + + return s; +} + +/** + * \brief The function compares two hugepage snapshots and prints out + * recommendations for hugepage configuration + * \param[in] pre_s a pointer to the snapshot taken before Suricata initialization + * \param[in] post_s a pointer to the snapshot taken after Suricata initialization + */ +void SystemHugepageEvaluateHugepages(SystemHugepageSnapshot *pre_s, SystemHugepageSnapshot *post_s) +{ + if (!SystemHugepageSupported() || pre_s == NULL || post_s == NULL) + return; + + SCLogDebug("Hugepages before initialization"); + SystemHugepageSnapshotDump(pre_s); + + SCLogDebug("Hugepages after initialization"); + SystemHugepageSnapshotDump(post_s); + + if (pre_s->num_nodes != post_s->num_nodes) + FatalError("Number of NUMA nodes changed during hugepage evaluation"); + + for (int32_t i = 0; i < post_s->num_nodes; i++) { + if (pre_s->nodes[i].num_hugepage_sizes != post_s->nodes[i].num_hugepage_sizes) + FatalError("Number of NUMA node hugepage sizes changed during hugepage evaluation"); + + for (int32_t j = 0; j < post_s->nodes->num_hugepage_sizes; j++) { + HugepageInfo *prerun_hp = &pre_s->nodes[i].hugepages[j]; + HugepageInfo *postrun_hp = &post_s->nodes[i].hugepages[j]; + + if (prerun_hp->free == 0) { + continue; // this HP size on this node has no HPs allocated + } else if (prerun_hp->free < postrun_hp->free) { + SCLogWarning( + "Hugepage usage decreased while it should only increase/stay the same"); + } else if (prerun_hp->free > 0 && prerun_hp->free == postrun_hp->free) { + SCLogPerf("%ukB hugepages on NUMA node %u are unused and can be deallocated", + postrun_hp->size_kb, i); + } else { // assumes this is an active NUMA node because at least some hugepages were + // used + // speculative hint only for 2048kB pages as e.g. 1 GB pages can leave a lot of room + // for additional allocations + if (postrun_hp->size_kb == 2048 && postrun_hp->free == 0) { + SCLogPerf("all %ukB hugepages used on NUMA node %d - consider increasing to " + "prevent memory allocation from other NUMA nodes", + postrun_hp->size_kb, i); + } + + float free_hugepages_ratio = (float)postrun_hp->free / (float)prerun_hp->free; + if (free_hugepages_ratio > 0.5) { + int32_t used_hps = prerun_hp->free - postrun_hp->free; + SCLogPerf("Hugepages on NUMA node %u can be set to %.0lf (only using %u/%u " + "%ukB hugepages)", + i, ceil((prerun_hp->free - postrun_hp->free) * 1.15), used_hps, + prerun_hp->free, postrun_hp->size_kb); + } + } + } + } +} diff --git a/src/util-hugepages.h b/src/util-hugepages.h new file mode 100644 index 000000000000..8946eae6adec --- /dev/null +++ b/src/util-hugepages.h @@ -0,0 +1,53 @@ +/* Copyright (C) 2023 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Lukas Sismis + */ + +#ifndef UTIL_HUGEPAGES_H +#define UTIL_HUGEPAGES_H + +typedef struct { + uint32_t size_kb; + uint16_t allocated; + uint16_t free; +} HugepageInfo; + +// Structure to hold information about individual NUMA nodes in the system and +// and their respective allocated hugepages +// So for e.g. NUMA node 0 there can be 2 hugepage_size - 2 MB and 1 GB +// Each hugepage size will then have a record of number of allocated/free hpages +typedef struct { + uint16_t num_hugepage_sizes; + HugepageInfo *hugepages; +} NodeInfo; + +// Structure to hold information about all hugepage sizes residing on all NUMA +// nodes in the system +typedef struct { + uint16_t num_nodes; + NodeInfo *nodes; +} SystemHugepageSnapshot; + +SystemHugepageSnapshot *SystemHugepageSnapshotCreate(void); +void SystemHugepageSnapshotDestroy(SystemHugepageSnapshot *s); +void SystemHugepageEvaluateHugepages(SystemHugepageSnapshot *pre_s, SystemHugepageSnapshot *post_s); + +#endif /* UTIL_HUGEPAGES_H */ diff --git a/src/util-ja3.c b/src/util-ja3.c index b361b3e74e39..b89a62e0d0bf 100644 --- a/src/util-ja3.c +++ b/src/util-ja3.c @@ -64,6 +64,8 @@ void Ja3BufferFree(JA3Buffer **buffer) *buffer = NULL; } +#ifdef HAVE_JA3 + /** * \internal * \brief Resize buffer if it is full. @@ -300,3 +302,29 @@ InspectionBuffer *Ja3DetectGetString(DetectEngineThreadCtx *det_ctx, } return buffer; } + +#else /* HAVE_JA3 */ + +/* Stubs for when JA3 is disabled */ + +int Ja3BufferAppendBuffer(JA3Buffer **buffer1, JA3Buffer **buffer2) +{ + return 0; +} + +int Ja3BufferAddValue(JA3Buffer **buffer, uint32_t value) +{ + return 0; +} + +char *Ja3GenerateHash(JA3Buffer *buffer) +{ + return NULL; +} + +int Ja3IsDisabled(const char *type) +{ + return true; +} + +#endif /* HAVE_JA3 */ diff --git a/src/util-ja3.h b/src/util-ja3.h index 5a0f8c508e6d..c5de15fa7757 100644 --- a/src/util-ja3.h +++ b/src/util-ja3.h @@ -41,6 +41,7 @@ int Ja3BufferAddValue(JA3Buffer **, uint32_t); char *Ja3GenerateHash(JA3Buffer *); int Ja3IsDisabled(const char *); +#ifdef HAVE_JA3 InspectionBuffer *Ja3DetectGetHash(DetectEngineThreadCtx *det_ctx, const DetectEngineTransforms *transforms, Flow *_f, const uint8_t _flow_flags, void *txv, const int list_id); @@ -48,6 +49,5 @@ InspectionBuffer *Ja3DetectGetHash(DetectEngineThreadCtx *det_ctx, InspectionBuffer *Ja3DetectGetString(DetectEngineThreadCtx *det_ctx, const DetectEngineTransforms *transforms, Flow *_f, const uint8_t _flow_flags, void *txv, const int list_id); - -#endif /* __UTIL_JA3_H__ */ - +#endif /* HAVE_JA3 */ +#endif /* SURICATA_UTIL_JA3_H */ diff --git a/src/util-ja4.h b/src/util-ja4.h new file mode 100644 index 000000000000..769e089652d8 --- /dev/null +++ b/src/util-ja4.h @@ -0,0 +1,29 @@ +/* Copyright (C) 2024 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Sascha Steinbiss + */ + +#ifndef SURICATA_UTIL_JA4_H +#define SURICATA_UTIL_JA4_H + +#define JA4_HEX_LEN 36 + +#endif /* SURICATA_UTIL_JA4_H */ diff --git a/src/util-port-interval-tree.c b/src/util-port-interval-tree.c new file mode 100644 index 000000000000..57fe1094f204 --- /dev/null +++ b/src/util-port-interval-tree.c @@ -0,0 +1,326 @@ +/* Copyright (C) 2024 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Shivani Bhardwaj + */ + +#include "util-port-interval-tree.h" +#include "util-validate.h" +#include "detect-engine-siggroup.h" +#include "detect-engine-port.h" + +/** + * \brief Function to compare two interval nodes. This defines the order + * of insertion of a node in the interval tree. This also updates + * the max attribute of any node in a given tree if needed. + * + * \param a First node to compare + * \param b Second node to compare + * + * \return 1 if low of node a is bigger, -1 otherwise + */ +static int SCPortIntervalCompareAndUpdate(const SCPortIntervalNode *a, SCPortIntervalNode *b) +{ + if (a->port2 > b->max) { + b->max = a->port2; + } + if (a->port >= b->port) { + SCReturnInt(1); + } + SCReturnInt(-1); +} + +// cppcheck-suppress nullPointerRedundantCheck +IRB_GENERATE(PI, SCPortIntervalNode, irb, SCPortIntervalCompareAndUpdate); + +/** + * \brief Function to initialize the interval tree. + * + * \return Pointer to the newly created interval tree + */ +SCPortIntervalTree *SCPortIntervalTreeInit(void) +{ + SCPortIntervalTree *it = SCCalloc(1, sizeof(SCPortIntervalTree)); + if (it == NULL) { + return NULL; + } + + return it; +} + +/** + * \brief Helper function to free a given node in the interval tree. + * + * \param de_ctx Detection Engine Context + * \param it Pointer to the interval tree + */ +static void SCPortIntervalNodeFree(DetectEngineCtx *de_ctx, SCPortIntervalTree *it) +{ + SCPortIntervalNode *node = NULL, *safe = NULL; + IRB_FOREACH_SAFE(node, PI, &it->tree, safe) + { + SigGroupHeadFree(de_ctx, node->sh); + PI_IRB_REMOVE(&it->tree, node); + SCFree(node); + } + it->head = NULL; +} + +/** + * \brief Function to free an entire interval tree. + * + * \param de_ctx Detection Engine Context + * \param it Pointer to the interval tree + */ +void SCPortIntervalTreeFree(DetectEngineCtx *de_ctx, SCPortIntervalTree *it) +{ + if (it) { + SCPortIntervalNodeFree(de_ctx, it); + SCFree(it); + } +} + +/** + * \brief Function to insert a node in the interval tree. + * + * \param de_ctx Detection Engine Context + * \param it Pointer to the interval tree + * \param p Pointer to a DetectPort object + * + * \return SC_OK if the node was inserted successfully, SC_EINVAL otherwise + */ +int SCPortIntervalInsert(DetectEngineCtx *de_ctx, SCPortIntervalTree *it, const DetectPort *p) +{ + DEBUG_VALIDATE_BUG_ON(p->port > p->port2); + + SCPortIntervalNode *pi = SCCalloc(1, sizeof(*pi)); + if (pi == NULL) { + return SC_EINVAL; + } + + pi->port = p->port; + pi->port2 = p->port2; + SigGroupHeadCopySigs(de_ctx, p->sh, &pi->sh); + + if (PI_IRB_INSERT(&it->tree, pi) != NULL) { + SCLogDebug("Node wasn't added to the tree: port: %d, port2: %d", pi->port, pi->port2); + SCFree(pi); + return SC_EINVAL; + } + return SC_OK; +} + +/** + * \brief Function to remove multiple sig entries corresponding to the same + * signature group and merge them into one. + * + * \param de_ctx Detection Engine Context + * \param list Pointer to the list to be modified + */ +static void SCPortIntervalSanitizeList(DetectEngineCtx *de_ctx, DetectPort **list) +{ + DetectPort *cur = (*list)->last; + if (cur == NULL) + return; + + DetectPort *prev = (*list)->last->prev; + if (prev == NULL) + return; + + /* rulegroup IDs are assigned much later so, compare SGHs */ + if (SigGroupHeadEqual(prev->sh, cur->sh)) { + if (prev->port2 == (cur->port - 1)) { + /* Merge the port objects */ + prev->port2 = cur->port2; + (*list)->last = prev; + (*list)->last->next = NULL; + DetectPortFree(de_ctx, cur); + } + } +} + +/** + * \brief Function to check if a port range overlaps with a given set of ports + * + * \param port Given low port + * \param port2 Given high port + * \param ptr Pointer to the node in the tree to be checked against + * + * \return true if an overlaps was found, false otherwise + */ +static bool SCPortIntervalIsOverlap( + const uint16_t port, const uint16_t port2, const SCPortIntervalNode *ptr) +{ + /* Two intervals i and i' are said to overlap if + * - i (intersection) i' != NIL + * - i.low <= i'.high + * - i'.low <= i.high + * + * There are four possible cases of overlaps as shown below which + * are all covered by the if condition that follows. + * + * Case 1: [.........] i + * [...................] i' + * + * Case 2: [...................] i + * [.........] i' + * + * Case 3: [........] i + * [..............] i' + * + * Case 4: [..............] i + * [.............] i' + */ + if (port <= ptr->port2 && ptr->port <= port2) { + return true; + } + + SCLogDebug("No overlap found for [%d, %d] w [%d, %d]", port, port2, ptr->port, ptr->port2); + return false; +} + +#define STACK_SIZE 100 + +/** + * \brief Function to find all the overlaps of given ports with the existing + * port ranges in the interval tree. This function takes in a low and + * a high port, considers it a continuos range and tries to match it + * against all the given port ranges in the interval tree. This search + * for overlap happens in min(O(k*log(n)), O(n*n)) time where, + * n = number of nodes in the tree, and, + * k = number of intervals with which an overlap was found + * + * \param de_ctx Detection Engine Context + * \param port Given low port + * \param port2 Given high port + * \param ptr Pointer to the root of the tree + * \param list A list of DetectPort objects to be filled + */ +static void SCPortIntervalFindOverlaps(DetectEngineCtx *de_ctx, const uint16_t port, + const uint16_t port2, SCPortIntervalNode *root, DetectPort **list) +{ + DetectPort *new_port = NULL; + int stack_depth = 0; + SCPortIntervalNode **stack = + (SCPortIntervalNode **)SCCalloc(STACK_SIZE, sizeof(SCPortIntervalNode *)); + if (stack == NULL) + return; + SCPortIntervalNode *current = root; + int stack_size = STACK_SIZE; + + while (current || stack_depth) { + while (current != NULL) { + if (current->max < port) { + current = NULL; + break; + } + const bool is_overlap = SCPortIntervalIsOverlap(port, port2, current); + + if (is_overlap && (new_port == NULL)) { + /* Allocate memory for port obj only if it's first overlap */ + new_port = DetectPortInit(); + if (new_port == NULL) { + goto error; + } + + SCLogDebug("Found overlaps for [%u:%u], creating new port", port, port2); + new_port->port = port; + new_port->port2 = port2; + SigGroupHeadCopySigs(de_ctx, current->sh, &new_port->sh); + + /* Since it is guaranteed that the ports received by this stage + * will be sorted, insert any new ports to the end of the list + * and avoid walking the entire list */ + if (*list == NULL) { + *list = new_port; + (*list)->last = new_port; + } else if (((*list)->last->port != new_port->port) && + ((*list)->last->port2 != new_port->port2)) { + DEBUG_VALIDATE_BUG_ON(new_port->port < (*list)->last->port); + (*list)->last->next = new_port; + new_port->prev = (*list)->last; + (*list)->last = new_port; + } else { + SCLogDebug("Port already exists in the list"); + goto error; + } + } else if (is_overlap && (new_port != NULL)) { + SCLogDebug("Found overlaps for [%u:%u], adding sigs", port, port2); + /* Only copy the relevant SGHs on later overlaps */ + SigGroupHeadCopySigs(de_ctx, current->sh, &new_port->sh); + } + stack[stack_depth++] = current; + if (stack_depth == stack_size) { + SCLogDebug("Stack depth %d maxed out, realloc'ing..", stack_depth); + stack_size *= 2; + void *tmp = SCRealloc(stack, stack_size * sizeof(SCPortIntervalNode *)); + if (tmp == NULL) { + SCLogError("Couldn't realloc the interval tree stack"); + goto error; + } + stack = tmp; + } + current = IRB_LEFT(current, irb); + } + + if (stack_depth == 0) { + SCLogDebug("Stack depth was exhausted"); + break; + } + + SCPortIntervalNode *popped = stack[stack_depth - 1]; + stack_depth--; + BUG_ON(popped == NULL); + current = IRB_RIGHT(popped, irb); + } + if (new_port != NULL) + SCPortIntervalSanitizeList(de_ctx, list); + if (stack != NULL) + SCFree(stack); + return; +error: + if (new_port != NULL) + DetectPortFree(de_ctx, new_port); + if (stack != NULL) + SCFree(stack); + return; +} + +/** + * \brief Callee function to find all overlapping port ranges as asked + * by the detection engine during Stage 2 of signature grouping. + * + * \param de_ctx Detection Engine Context + * \param port Given low port + * \param port2 Given high port + * \param head Pointer to the head of the tree named PI + * \param list Pointer to the list of port objects that needs to be filled/updated + */ +void SCPortIntervalFindOverlappingRanges(DetectEngineCtx *de_ctx, const uint16_t port, + const uint16_t port2, const struct PI *head, DetectPort **list) +{ + if (head == NULL) { + SCLogDebug("Tree head should not be NULL. Nothing to do further."); + return; + } + SCPortIntervalNode *ptr = IRB_ROOT(head); + SCLogDebug("Finding overlaps for the range [%d, %d]", port, port2); + SCPortIntervalFindOverlaps(de_ctx, port, port2, ptr, list); +} diff --git a/src/util-port-interval-tree.h b/src/util-port-interval-tree.h new file mode 100644 index 000000000000..0eda61b72a02 --- /dev/null +++ b/src/util-port-interval-tree.h @@ -0,0 +1,29 @@ +/* Copyright (C) 2024 Open Information Security Foundation + * + * You can copy, redistribute or modify this Program under the terms of + * the GNU General Public License version 2 as published by the Free + * Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/** + * \file + * + * \author Shivani Bhardwaj + */ + +#ifndef __UTIL_INTERVAL_TREE_H__ +#define __UTIL_INTERVAL_TREE_H__ + +#include "detect-engine-port.h" + +#endif /* __UTIL_INTERVAL_TREE_H__ */ diff --git a/src/util-profiling.c b/src/util-profiling.c index 5d4bcc8905ce..1cf82210726f 100644 --- a/src/util-profiling.c +++ b/src/util-profiling.c @@ -1208,15 +1208,16 @@ int SCProfileRuleStart(Packet *p) p->flags |= PKT_PROFILE; return 1; } -#else +#endif + if (p->flags & PKT_PROFILE) { + return 1; + } + uint64_t sample = SC_ATOMIC_ADD(samples, 1); - if (sample % rate == 0) { + if ((sample % rate) == 0) { p->flags |= PKT_PROFILE; return 1; } -#endif - if (p->flags & PKT_PROFILE) - return 1; return 0; } @@ -1429,6 +1430,7 @@ void SCProfilingInit(void) SC_ATOMIC_INIT(profiling_rules_active); SC_ATOMIC_INIT(samples); intmax_t rate_v = 0; + ConfNode *conf; (void)ConfGetInt("profiling.sample-rate", &rate_v); if (rate_v > 0 && rate_v < INT_MAX) { @@ -1445,22 +1447,29 @@ void SCProfilingInit(void) else SCLogInfo("profiling runs for every packet"); } + + conf = ConfGetNode("profiling.rules"); + if (ConfNodeChildValueIsTrue(conf, "active")) { + SC_ATOMIC_SET(profiling_rules_active, 1); + } } /* see if we want to profile rules for this packet */ int SCProfileRuleStart(Packet *p) { + /* Move first so we'll always finish even if dynamically disabled */ + if (p->flags & PKT_PROFILE) + return 1; + if (!SC_ATOMIC_GET(profiling_rules_active)) { return 0; } + uint64_t sample = SC_ATOMIC_ADD(samples, 1); if ((sample & rate) == 0) { p->flags |= PKT_PROFILE; return 1; } - - if (p->flags & PKT_PROFILE) - return 1; return 0; } diff --git a/src/util-profiling.h b/src/util-profiling.h index 6450bc8cefe3..c066d0b36f40 100644 --- a/src/util-profiling.h +++ b/src/util-profiling.h @@ -417,7 +417,7 @@ void SCProfilingRuleThreatAggregate(DetectEngineThreadCtx *det_ctx); } #define RULE_PROFILING_END(ctx, r, m, p) \ - if (profiling_rules_enabled && ((p)->flags & PKT_PROFILE)) { \ + if (profiling_rules_enabled && profiling_rules_entered) { \ profile_rule_end_ = UtilCpuGetTicks(); \ SCProfilingRuleUpdateCounter( \ ctx, r->profiling_id, profile_rule_end_ - profile_rule_start_, m); \ diff --git a/src/util-radix-tree.c b/src/util-radix-tree.c index 97c85602d8a0..e5fca2fcbe09 100644 --- a/src/util-radix-tree.c +++ b/src/util-radix-tree.c @@ -723,6 +723,13 @@ static SCRadixNode *SCRadixAddKeyInternal(uint8_t *key_stream, uint8_t key_bitle return NULL; } new_node = SCRadixCreateNode(); + + if (new_node == NULL) { + SCRadixReleasePrefix(prefix, tree); + sc_errno = SC_ENOMEM; + return NULL; + } + new_node->prefix = prefix; new_node->bit = prefix->bitlen; @@ -753,6 +760,13 @@ static SCRadixNode *SCRadixAddKeyInternal(uint8_t *key_stream, uint8_t key_bitle * detail below) */ } else { inter_node = SCRadixCreateNode(); + + if (inter_node == NULL) { + SCRadixReleaseNode(new_node, tree); + sc_errno = SC_ENOMEM; + return NULL; + } + inter_node->prefix = NULL; inter_node->bit = differ_bit; inter_node->parent = node->parent; diff --git a/src/util-streaming-buffer.c b/src/util-streaming-buffer.c index 7608b5082109..c678279dd414 100644 --- a/src/util-streaming-buffer.c +++ b/src/util-streaming-buffer.c @@ -23,6 +23,10 @@ #include "util-debug.h" #include "util-error.h" +#include "app-layer-htp-mem.h" +#include "conf-yaml-loader.h" +#include "app-layer-htp.h" + static void ListRegions(StreamingBuffer *sb); #define DUMP_REGIONS 0 // set to 1 to dump a visual representation of the regions list and sbb tree. @@ -721,6 +725,8 @@ static inline int WARN_UNUSED GrowRegionToSize(StreamingBuffer *sb, void *ptr = REALLOC(cfg, region->buf, region->buf_size, grow); if (ptr == NULL) { + if (sc_errno == SC_OK) + sc_errno = SC_ENOMEM; return sc_errno; } /* for safe printing and general caution, lets memset the @@ -842,16 +848,11 @@ static inline void StreamingBufferSlideToOffsetWithRegions( r = next; } SCLogDebug("to_shift %p", to_shift); - } else { - to_shift = &sb->region; - SCLogDebug("shift start region %p", to_shift); - } - // this region is main, or will xfer its buffer to main - if (to_shift) { - SCLogDebug("main: offset %" PRIu64 " buf %p size %u offset %u", to_shift->stream_offset, - to_shift->buf, to_shift->buf_size, to_shift->buf_offset); - if (to_shift != &sb->region) { + // this region is main, or will xfer its buffer to main + if (to_shift && to_shift != &sb->region) { + SCLogDebug("main: offset %" PRIu64 " buf %p size %u offset %u", to_shift->stream_offset, + to_shift->buf, to_shift->buf_size, to_shift->buf_offset); DEBUG_VALIDATE_BUG_ON(sb->region.buf != NULL); sb->region.buf = to_shift->buf; @@ -860,12 +861,20 @@ static inline void StreamingBufferSlideToOffsetWithRegions( sb->region.buf_size = to_shift->buf_size; sb->region.next = to_shift->next; + BUG_ON(to_shift == &sb->region); FREE(cfg, to_shift, sizeof(*to_shift)); to_shift = &sb->region; sb->regions--; DEBUG_VALIDATE_BUG_ON(sb->regions == 0); } + } else { + to_shift = &sb->region; + SCLogDebug("shift start region %p", to_shift); + } + + // this region is main, or will xfer its buffer to main + if (to_shift) { // Do the shift. If new region is exactly at the slide offset we can skip this. DEBUG_VALIDATE_BUG_ON(to_shift->stream_offset > slide_offset); const uint32_t s = slide_offset - to_shift->stream_offset; @@ -1061,7 +1070,15 @@ void StreamingBufferSlideToOffset( DEBUG_VALIDATE_BUG_ON(sb->region.stream_offset < offset); } -#define DATA_FITS(sb, len) ((sb)->region.buf_offset + (len) <= (sb)->region.buf_size) +static int DataFits(const StreamingBuffer *sb, const uint32_t len) +{ + uint64_t buf_offset64 = sb->region.buf_offset; + uint64_t len64 = len; + if (len64 + buf_offset64 > UINT32_MAX) { + return -1; + } + return sb->region.buf_offset + len <= sb->region.buf_size; +} int StreamingBufferAppend(StreamingBuffer *sb, const StreamingBufferConfig *cfg, StreamingBufferSegment *seg, const uint8_t *data, uint32_t data_len) @@ -1073,7 +1090,11 @@ int StreamingBufferAppend(StreamingBuffer *sb, const StreamingBufferConfig *cfg, return -1; } - if (!DATA_FITS(sb, data_len)) { + int r = DataFits(sb, data_len); + if (r < 0) { + DEBUG_VALIDATE_BUG_ON(1); + return -1; + } else if (r == 0) { if (sb->region.buf_size == 0) { if (GrowToSize(sb, cfg, data_len) != SC_OK) return -1; @@ -1082,7 +1103,9 @@ int StreamingBufferAppend(StreamingBuffer *sb, const StreamingBufferConfig *cfg, return -1; } } - DEBUG_VALIDATE_BUG_ON(!DATA_FITS(sb, data_len)); + DEBUG_VALIDATE_BUG_ON(DataFits(sb, data_len) != 1); + if (DataFits(sb, data_len) != 1) + return -1; memcpy(sb->region.buf + sb->region.buf_offset, data, data_len); seg->stream_offset = sb->region.stream_offset + sb->region.buf_offset; @@ -1108,7 +1131,11 @@ int StreamingBufferAppendNoTrack(StreamingBuffer *sb, const StreamingBufferConfi return -1; } - if (!DATA_FITS(sb, data_len)) { + int r = DataFits(sb, data_len); + if (r < 0) { + DEBUG_VALIDATE_BUG_ON(1); + return -1; + } else if (r == 0) { if (sb->region.buf_size == 0) { if (GrowToSize(sb, cfg, data_len) != SC_OK) return -1; @@ -1117,7 +1144,7 @@ int StreamingBufferAppendNoTrack(StreamingBuffer *sb, const StreamingBufferConfi return -1; } } - DEBUG_VALIDATE_BUG_ON(!DATA_FITS(sb, data_len)); + DEBUG_VALIDATE_BUG_ON(DataFits(sb, data_len) != 1); memcpy(sb->region.buf + sb->region.buf_offset, data, data_len); uint32_t rel_offset = sb->region.buf_offset; @@ -1130,7 +1157,15 @@ int StreamingBufferAppendNoTrack(StreamingBuffer *sb, const StreamingBufferConfi } } -#define DATA_FITS_AT_OFFSET(region, len, offset) ((offset) + (len) <= (region)->buf_size) +static int DataFitsAtOffset( + const StreamingBufferRegion *region, const uint32_t len, const uint32_t offset) +{ + const uint64_t offset64 = offset; + const uint64_t len64 = len; + if (offset64 + len64 > UINT32_MAX) + return -1; + return (offset + len <= region->buf_size); +} #if defined(DEBUG) || defined(DEBUG_VALIDATION) static void Validate(const StreamingBuffer *sb) @@ -1474,8 +1509,6 @@ static StreamingBufferRegion *BufferInsertAtRegion(StreamingBuffer *sb, int StreamingBufferInsertAt(StreamingBuffer *sb, const StreamingBufferConfig *cfg, StreamingBufferSegment *seg, const uint8_t *data, uint32_t data_len, uint64_t offset) { - int r; - DEBUG_VALIDATE_BUG_ON(seg == NULL); DEBUG_VALIDATE_BUG_ON(offset < sb->region.stream_offset); if (offset < sb->region.stream_offset) { @@ -1493,11 +1526,15 @@ int StreamingBufferInsertAt(StreamingBuffer *sb, const StreamingBufferConfig *cf region == &sb->region ? "main" : "aux", region); uint32_t rel_offset = offset - region->stream_offset; - if (!DATA_FITS_AT_OFFSET(region, data_len, rel_offset)) { + int r = DataFitsAtOffset(region, data_len, rel_offset); + if (r < 0) { + DEBUG_VALIDATE_BUG_ON(1); + return SC_ELIMIT; + } else if (r == 0) { if ((r = GrowToSize(sb, cfg, (rel_offset + data_len))) != SC_OK) return r; } - DEBUG_VALIDATE_BUG_ON(!DATA_FITS_AT_OFFSET(region, data_len, rel_offset)); + DEBUG_VALIDATE_BUG_ON(DataFitsAtOffset(region, data_len, rel_offset) != 1); SCLogDebug("offset %" PRIu64 " data_len %u, rel_offset %u into region offset %" PRIu64 ", buf_offset %u, buf_size %u", @@ -2317,6 +2354,61 @@ static int StreamingBufferTest10(void) PASS; } +static int StreamingBufferTest11(void) +{ + StreamingBufferConfig cfg = { 24, 1, STREAMING_BUFFER_REGION_GAP_DEFAULT, NULL, NULL, NULL }; + StreamingBuffer *sb = StreamingBufferInit(&cfg); + FAIL_IF(sb == NULL); + + StreamingBufferSegment seg1; + FAIL_IF(StreamingBufferAppend(sb, &cfg, &seg1, (const uint8_t *)"ABCDEFGH", 8) != 0); + StreamingBufferSegment seg2; + unsigned int data_len = 0xffffffff; + FAIL_IF(StreamingBufferAppend(sb, &cfg, &seg2, (const uint8_t *)"unused", data_len) != -1); + FAIL_IF(StreamingBufferInsertAt( + sb, &cfg, &seg2, (const uint8_t *)"abcdefghij", data_len, 100000) != SC_ELIMIT); + StreamingBufferFree(sb, &cfg); + PASS; +} + +static const char *dummy_conf_string = "%YAML 1.1\n" + "---\n" + "\n" + "app-layer:\n" + " protocols:\n" + " http:\n" + " enabled: yes\n" + " memcap: 88\n" + "\n"; + +static int StreamingBufferTest12(void) +{ + ConfCreateContextBackup(); + ConfInit(); + HtpConfigCreateBackup(); + ConfYamlLoadString((const char *)dummy_conf_string, strlen(dummy_conf_string)); + HTPConfigure(); + + StreamingBufferConfig cfg = { 8, 1, STREAMING_BUFFER_REGION_GAP_DEFAULT, HTPCalloc, HTPRealloc, + HTPFree }; + StreamingBuffer *sb = StreamingBufferInit(&cfg); + FAIL_IF(sb == NULL); + + StreamingBufferSegment seg1; + FAIL_IF(StreamingBufferAppend(sb, &cfg, &seg1, (const uint8_t *)"ABCDEFGHIJKLMNOP", 16) != 0); + + StreamingBufferSegment seg2; + FAIL_IF(StreamingBufferAppend(sb, &cfg, &seg2, + (const uint8_t *)"ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ", + 52) != -1); + FAIL_IF(sc_errno != SC_ELIMIT); + + StreamingBufferFree(sb, &cfg); + HtpConfigRestoreBackup(); + ConfRestoreContextBackup(); + + PASS; +} #endif void StreamingBufferRegisterTests(void) @@ -2330,5 +2422,7 @@ void StreamingBufferRegisterTests(void) UtRegisterTest("StreamingBufferTest08", StreamingBufferTest08); UtRegisterTest("StreamingBufferTest09", StreamingBufferTest09); UtRegisterTest("StreamingBufferTest10", StreamingBufferTest10); + UtRegisterTest("StreamingBufferTest11 Bug 6903", StreamingBufferTest11); + UtRegisterTest("StreamingBufferTest12 Bug 6782", StreamingBufferTest12); #endif } diff --git a/src/util-thash.c b/src/util-thash.c index 6443990bc219..d9500e726222 100644 --- a/src/util-thash.c +++ b/src/util-thash.c @@ -368,8 +368,8 @@ void THashShutdown(THashTableContext *ctx) } SCFreeAligned(ctx->array); ctx->array = NULL; + (void)SC_ATOMIC_SUB(ctx->memuse, ctx->config.hash_size * sizeof(THashHashRow)); } - (void) SC_ATOMIC_SUB(ctx->memuse, ctx->config.hash_size * sizeof(THashHashRow)); THashDataQueueDestroy(&ctx->spare_q); SCFree(ctx); return; diff --git a/src/util-threshold-config.c b/src/util-threshold-config.c index 5d762a8f7091..614031be13cd 100644 --- a/src/util-threshold-config.c +++ b/src/util-threshold-config.c @@ -30,6 +30,7 @@ #include "suricata-common.h" +#include "action-globals.h" #include "host.h" #include "ippair.h" @@ -258,7 +259,7 @@ static int SetupSuppressRule(DetectEngineCtx *de_ctx, uint32_t id, uint32_t gid, for (s = de_ctx->sig_list; s != NULL; s = s->next) { /* tag the rule as noalert */ if (parsed_track == TRACK_RULE) { - s->flags |= SIG_FLAG_NOALERT; + s->action &= ~ACTION_ALERT; continue; } @@ -287,7 +288,7 @@ static int SetupSuppressRule(DetectEngineCtx *de_ctx, uint32_t id, uint32_t gid, /* tag the rule as noalert */ if (parsed_track == TRACK_RULE) { - s->flags |= SIG_FLAG_NOALERT; + s->action &= ~ACTION_ALERT; continue; } @@ -319,7 +320,7 @@ static int SetupSuppressRule(DetectEngineCtx *de_ctx, uint32_t id, uint32_t gid, id, gid); } else { if (parsed_track == TRACK_RULE) { - s->flags |= SIG_FLAG_NOALERT; + s->action &= ~ACTION_ALERT; goto end; } diff --git a/src/util-time.h b/src/util-time.h index 9bbd8798dd17..b0f7207b3c86 100644 --- a/src/util-time.h +++ b/src/util-time.h @@ -56,8 +56,16 @@ typedef struct { #define SCTIME_USECS(t) ((uint64_t)(t).usecs) #define SCTIME_SECS(t) ((uint64_t)(t).secs) #define SCTIME_MSECS(t) (SCTIME_SECS(t) * 1000 + SCTIME_USECS(t) / 1000) -#define SCTIME_ADD_SECS(ts, s) SCTIME_FROM_SECS((ts).secs + (s)) -#define SCTIME_ADD_USECS(ts, us) SCTIME_FROM_USECS((ts).usecs + (us)) +#define SCTIME_ADD_USECS(ts, us) \ + (SCTime_t) \ + { \ + .secs = (ts).secs + ((ts).usecs + (us)) / 1000000, .usecs = ((ts).usecs + (us)) % 1000000 \ + } +#define SCTIME_ADD_SECS(ts, s) \ + (SCTime_t) \ + { \ + .secs = (ts).secs + (s), .usecs = (ts).usecs \ + } #define SCTIME_FROM_SECS(s) \ (SCTime_t) \ { \ @@ -83,7 +91,7 @@ typedef struct { #define SCTIME_FROM_TIMESPEC(ts) \ (SCTime_t) \ { \ - .secs = (ts)->tv_sec, .usecs = (ts)->tv_nsec * 1000 \ + .secs = (ts)->tv_sec, .usecs = (ts)->tv_nsec / 1000 \ } #define SCTIME_TO_TIMEVAL(tv, t) \ diff --git a/src/util-unittest-helper.c b/src/util-unittest-helper.c index 80356cf82e2b..414f5054b9df 100644 --- a/src/util-unittest-helper.c +++ b/src/util-unittest-helper.c @@ -316,6 +316,7 @@ Packet *UTHBuildPacketReal(uint8_t *payload, uint16_t payload_len, } SET_PKT_LEN(p, hdr_offset + payload_len); p->payload = GET_PKT_DATA(p)+hdr_offset; + p->app_update_direction = UPDATE_DIR_BOTH; return p; diff --git a/suricata.yaml.in b/suricata.yaml.in index 630399126dbe..458360b1e64f 100644 --- a/suricata.yaml.in +++ b/suricata.yaml.in @@ -251,8 +251,11 @@ outputs: # output TLS transaction where the session is resumed using a # session id #session-resumption: no + # ja4 hashes in tls records will never be logged unless + # the following is set to on. (Default off) + # ja4: off # custom controls which TLS fields that are included in eve-log - #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s] + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s, ja4] - files: force-magic: no # force logging magic on all logged files # force logging of checksums, available hash functions are md5, @@ -291,7 +294,10 @@ outputs: - snmp - rfb - sip - - quic + - quic: + # ja4 hashes in quic records will never be logged unless + # the following is set to on. (Default off) + # ja4: off - dhcp: enabled: yes # When extended mode is on, all DHCP messages are logged @@ -753,6 +759,7 @@ dpdk: # - auto takes all cores # in IPS mode it is required to specify the number of cores and the numbers on both interfaces must match threads: auto + # interrupt-mode: false # true to switch to interrupt mode promisc: true # promiscuous mode - capture all packets multicast: true # enables also detection on multicast packets checksum-checks: true # if Suricata should validate checksums @@ -892,9 +899,10 @@ app-layer: detection-ports: dp: 443 - # Generate JA3 fingerprint from client hello. If not specified it + # Generate JA3/JA4 fingerprints from client hello. If not specified it # will be disabled by default, but enabled if rules require it. #ja3-fingerprints: auto + #ja4-fingerprints: auto # What to do when the encrypted communications start: # - default: keep tracking TLS session, check for protocol anomalies, @@ -934,9 +942,13 @@ app-layer: #max-streams: 4096 # Maximum headers table size #max-table-size: 65536 + # Maximum reassembly size for header + continuation frames + #max-reassembly-size: 102400 smtp: enabled: yes raw-extraction: no + # Maximum number of live SMTP transactions per flow + # max-tx: 256 # Configure SMTP-MIME Decoder mime: # Decode MIME messages from SMTP transactions @@ -1084,6 +1096,8 @@ app-layer: #compression-bomb-limit: 1mb # Maximum time spent decompressing a single transaction in usec #decompression-time-limit: 100000 + # Maximum number of live transactions per flow + #max-tx: 512 server-config: @@ -1798,6 +1812,10 @@ profiling: enabled: yes filename: rule_perf.log append: yes + # Set active to yes to enable rules profiling at start + # if set to no (default), the rules profiling will have to be started + # via unix socket commands. + #active:no # Sort options: ticks, avgticks, checks, matches, maxticks # If commented out all the sort options will be used.