diff --git a/configure.ac b/configure.ac index 77a90b88dcd1..03800c371734 100644 --- a/configure.ac +++ b/configure.ac @@ -273,7 +273,7 @@ CFLAGS="${CFLAGS} -DOS_WIN32" WINDOWS_PATH="yes" AC_DEFINE([HAVE_NON_POSIX_MKDIR], [1], [mkdir is not POSIX compliant: single arg]) - RUST_LDADD=" -lws2_32 -liphlpapi -lwbemuuid -lOle32 -lOleAut32 -lUuid -luserenv -lshell32 -ladvapi32 -lgcc_eh -lbcrypt" + RUST_LDADD=" -lws2_32 -liphlpapi -lwbemuuid -lOle32 -lOleAut32 -lUuid -luserenv -lshell32 -ladvapi32 -lgcc_eh -lbcrypt -lntdll" TRY_WPCAP="yes" ;; *-*-cygwin) diff --git a/doc/userguide/configuration/suricata-yaml.rst b/doc/userguide/configuration/suricata-yaml.rst index e66b35cda87d..c63f5c878d7f 100644 --- a/doc/userguide/configuration/suricata-yaml.rst +++ b/doc/userguide/configuration/suricata-yaml.rst @@ -2474,6 +2474,8 @@ Example: [10703] 26/11/2010 -- 11:41:15 - (detect.c:560) (SigLoadSignatures) -- Engine-Analysis for fast_pattern printed to file - /var/log/suricata/rules_fast_pattern.txt + alert tcp any any -> any any (content:"Volume Serial Number"; sid:1292;) + == Sid: 1292 == Fast pattern matcher: content Fast pattern set: no diff --git a/doc/userguide/devguide/codebase/contributing/github-pr-workflow.rst b/doc/userguide/devguide/codebase/contributing/github-pr-workflow.rst new file mode 100644 index 000000000000..618c966c43c2 --- /dev/null +++ b/doc/userguide/devguide/codebase/contributing/github-pr-workflow.rst @@ -0,0 +1,46 @@ +GitHub Pull Request Workflow +============================ + +Draft Pull Requests +~~~~~~~~~~~~~~~~~~~ + +A Pull Request (PR) should be marked as `draft` if it is not intended to be merged as is, +but is waiting for some sort of feedback. +The author of the PR should be explicit with what kind of feedback is expected +(CI/QA run, discussion on the code, etc...) + +GitHub filter is ``is:pr is:open draft:true sort:updated-asc`` + +A draft may be closed if it has not been updated in two months. + +Mergeable Pull Requests +~~~~~~~~~~~~~~~~~~~~~~~ + +When a Pull Request is intended to be merged as is, the workflow is the following: + 1. get reviewed, and either request changes or get approved + 2. if approved, get staged in a next branch (with other PRs), wait for CI validation + (and eventually request changes if CI finds anything) + 3. get merged and closed + +A newly created PR should match the filter +``is:pr is:open draft:false review:none sort:updated-asc no:assignee`` +The whole team is responsible to assign a PR to someone precise within 2 weeks. + +When someone gets assigned a PR, the PR should get a review status within 2 weeks: +either changes requested, approved, or assigned to someone else if more +expertise is needed. + +GitHub filter for changes-requested PRs is ``is:pr is:open draft:false sort: +updated-asc review:changes-requested`` + +Such a PR may be closed if it has not been updated in two months. +It is expected that the author creates a new PR with a new version of the patch +as described in :ref:`Pull Requests Criteria `. + +Command to get approved PRs is ``gh pr list --json number,reviewDecision --search +"state:open type:pr -review:none" | jq '.[] | select(.reviewDecision=="")'`` + +Web UI filter does not work cf https://github.com/orgs/community/discussions/55826 + +Once in approved state, the PRs are in the responsibility of the merger, along +with the next branches/PRs. diff --git a/doc/userguide/devguide/codebase/contributing/index.rst b/doc/userguide/devguide/codebase/contributing/index.rst index 4c9089e48427..e0d29125bbdd 100644 --- a/doc/userguide/devguide/codebase/contributing/index.rst +++ b/doc/userguide/devguide/codebase/contributing/index.rst @@ -6,3 +6,4 @@ Contributing contribution-process code-submission-process + github-pr-workflow diff --git a/doc/userguide/install.rst b/doc/userguide/install.rst index 6549d3aa3606..ff13c4e5634c 100644 --- a/doc/userguide/install.rst +++ b/doc/userguide/install.rst @@ -227,6 +227,18 @@ To reload rules:: .. _install-advanced: +Arch Based +^^^^^^^^^^ + +The ArchLinux AUR contains Suricata and suricata-nfqueue packages, with commonly +used configurations for compilation (may also be edited to your liking). You may +use makepkg, yay (sample below), or other AUR helpers to compile and build +Suricata packages. + +:: + + yay -S suricata + Advanced Installation --------------------- diff --git a/doc/userguide/upgrade.rst b/doc/userguide/upgrade.rst index 0dd7223dfca3..c4807b41731d 100644 --- a/doc/userguide/upgrade.rst +++ b/doc/userguide/upgrade.rst @@ -60,6 +60,9 @@ Logging changes - Protocol values and their names are built into Suricata instead of using the system's ``/etc/protocols`` file. Some names and casing may have changed in the values ``proto`` in ``eve.json`` log entries and other logs containing protocol names and values. See https://redmine.openinfosecfoundation.org/issues/4267 for more information. +- Custom logging of HTTP headers via suricata.yaml ``outputs.eve-log.types.http.custom`` + is now done in subobjects ``response_headers`` or ``request_headers`` (as for option ``dump-all-headers``) + instead of at the root of the ``http`` json object (to avoid some collisions). Deprecations ~~~~~~~~~~~~ diff --git a/rust/src/snmp/log.rs b/rust/src/snmp/log.rs index e37bbba30c06..83414816c466 100644 --- a/rust/src/snmp/log.rs +++ b/rust/src/snmp/log.rs @@ -18,7 +18,7 @@ // written by Pierre Chifflier use crate::jsonbuilder::{JsonBuilder, JsonError}; -use crate::snmp::snmp::{SNMPState,SNMPTransaction}; +use crate::snmp::snmp::SNMPTransaction; use crate::snmp::snmp_parser::{NetworkAddress,PduType}; use std::borrow::Cow; @@ -37,9 +37,9 @@ fn str_of_pdu_type(t:&PduType) -> Cow { } } -fn snmp_log_response(jsb: &mut JsonBuilder, state: &mut SNMPState, tx: &mut SNMPTransaction) -> Result<(), JsonError> +fn snmp_log_response(jsb: &mut JsonBuilder, tx: &mut SNMPTransaction) -> Result<(), JsonError> { - jsb.set_uint("version", state.version as u64)?; + jsb.set_uint("version", tx.version as u64)?; if tx.encrypted { jsb.set_string("pdu_type", "encrypted")?; } else { @@ -75,7 +75,7 @@ fn snmp_log_response(jsb: &mut JsonBuilder, state: &mut SNMPState, tx: &mut SNMP } #[no_mangle] -pub extern "C" fn rs_snmp_log_json_response(jsb: &mut JsonBuilder, state: &mut SNMPState, tx: &mut SNMPTransaction) -> bool +pub extern "C" fn rs_snmp_log_json_response(jsb: &mut JsonBuilder, tx: &mut SNMPTransaction) -> bool { - snmp_log_response(jsb, state, tx).is_ok() + snmp_log_response(jsb, tx).is_ok() } diff --git a/src/app-layer-ftp.c b/src/app-layer-ftp.c index 7f0accadc149..f96fcc362fd4 100644 --- a/src/app-layer-ftp.c +++ b/src/app-layer-ftp.c @@ -340,7 +340,8 @@ typedef struct FtpInput_ { int32_t orig_len; } FtpInput; -static AppLayerResult FTPGetLineForDirection(FtpState *state, FtpLineState *line, FtpInput *input) +static AppLayerResult FTPGetLineForDirection( + FtpState *state, FtpLineState *line, FtpInput *input, bool *current_line_truncated) { SCEnter(); @@ -351,8 +352,8 @@ static AppLayerResult FTPGetLineForDirection(FtpState *state, FtpLineState *line uint8_t *lf_idx = memchr(input->buf + input->consumed, 0x0a, input->len); if (lf_idx == NULL) { - if (!state->current_line_truncated && (uint32_t)input->len >= ftp_max_line_len) { - state->current_line_truncated = true; + if (!(*current_line_truncated) && (uint32_t)input->len >= ftp_max_line_len) { + *current_line_truncated = true; line->buf = input->buf; line->len = ftp_max_line_len; line->delim_len = 0; @@ -360,9 +361,9 @@ static AppLayerResult FTPGetLineForDirection(FtpState *state, FtpLineState *line SCReturnStruct(APP_LAYER_OK); } SCReturnStruct(APP_LAYER_INCOMPLETE(input->consumed, input->len + 1)); - } else if (state->current_line_truncated) { + } else if (*current_line_truncated) { // Whatever came in with first LF should also get discarded - state->current_line_truncated = false; + *current_line_truncated = false; line->len = 0; line->delim_len = 0; input->len = 0; @@ -372,26 +373,18 @@ static AppLayerResult FTPGetLineForDirection(FtpState *state, FtpLineState *line // e.g. input_len = 5077 // lf_idx = 5010 // max_line_len = 4096 - if (!state->current_line_truncated && (uint32_t)input->len >= ftp_max_line_len) { - state->current_line_truncated = true; - line->buf = input->buf; - line->len = ftp_max_line_len; - if (input->consumed >= 2 && input->buf[input->consumed - 2] == 0x0D) { - line->delim_len = 2; - line->len -= 2; - } else { - line->delim_len = 1; - line->len -= 1; - } - input->len = 0; - SCReturnStruct(APP_LAYER_OK); - } uint32_t o_consumed = input->consumed; input->consumed = lf_idx - input->buf + 1; line->len = input->consumed - o_consumed; input->len -= line->len; + line->lf_found = true; DEBUG_VALIDATE_BUG_ON((input->consumed + input->len) != input->orig_len); line->buf = input->buf + o_consumed; + if (line->len >= ftp_max_line_len) { + *current_line_truncated = true; + line->len = ftp_max_line_len; + SCReturnStruct(APP_LAYER_OK); + } if (input->consumed >= 2 && input->buf[input->consumed - 2] == 0x0D) { line->delim_len = 2; line->len -= 2; @@ -505,12 +498,12 @@ static AppLayerResult FTPParseRequest(Flow *f, void *ftp_state, AppLayerParserSt } FtpInput ftpi = { .buf = input, .len = input_len, .orig_len = input_len, .consumed = 0 }; - FtpLineState line = { .buf = NULL, .len = 0, .delim_len = 0 }; + FtpLineState line = { .buf = NULL, .len = 0, .delim_len = 0, .lf_found = false }; uint8_t direction = STREAM_TOSERVER; AppLayerResult res; while (1) { - res = FTPGetLineForDirection(state, &line, &ftpi); + res = FTPGetLineForDirection(state, &line, &ftpi, &state->current_line_truncated_ts); if (res.status == 1) { return res; } else if (res.status == -1) { @@ -531,8 +524,11 @@ static AppLayerResult FTPParseRequest(Flow *f, void *ftp_state, AppLayerParserSt tx->command_descriptor = cmd_descriptor; tx->request_length = CopyCommandLine(&tx->request, &line); - tx->request_truncated = state->current_line_truncated; + tx->request_truncated = state->current_line_truncated_ts; + if (line.lf_found) { + state->current_line_truncated_ts = false; + } if (tx->request_truncated) { AppLayerDecoderEventsSetEventRaw(&tx->tx_data.events, FtpEventRequestCommandTooLong); } @@ -695,12 +691,12 @@ static AppLayerResult FTPParseResponse(Flow *f, void *ftp_state, AppLayerParserS SCReturnStruct(APP_LAYER_OK); } FtpInput ftpi = { .buf = input, .len = input_len, .orig_len = input_len, .consumed = 0 }; - FtpLineState line = { .buf = NULL, .len = 0, .delim_len = 0 }; + FtpLineState line = { .buf = NULL, .len = 0, .delim_len = 0, .lf_found = false }; FTPTransaction *lasttx = TAILQ_FIRST(&state->tx_list); AppLayerResult res; while (1) { - res = FTPGetLineForDirection(state, &line, &ftpi); + res = FTPGetLineForDirection(state, &line, &ftpi, &state->current_line_truncated_tc); if (res.status == 1) { return res; } else if (res.status == -1) { @@ -771,11 +767,14 @@ static AppLayerResult FTPParseResponse(Flow *f, void *ftp_state, AppLayerParserS FTPString *response = FTPStringAlloc(); if (likely(response)) { response->len = CopyCommandLine(&response->str, &line); - response->truncated = state->current_line_truncated; + response->truncated = state->current_line_truncated_tc; if (response->truncated) { AppLayerDecoderEventsSetEventRaw( &tx->tx_data.events, FtpEventResponseCommandTooLong); } + if (line.lf_found) { + state->current_line_truncated_tc = false; + } TAILQ_INSERT_TAIL(&tx->response_list, response, next); } } diff --git a/src/app-layer-ftp.h b/src/app-layer-ftp.h index 39b53b6bf8bb..f79c5c9e7675 100644 --- a/src/app-layer-ftp.h +++ b/src/app-layer-ftp.h @@ -105,6 +105,7 @@ typedef struct FtpLineState_ { const uint8_t *buf; uint32_t len; uint8_t delim_len; + bool lf_found; } FtpLineState; typedef struct FTPString_ { @@ -148,7 +149,8 @@ typedef struct FtpState_ { TAILQ_HEAD(, FTPTransaction_) tx_list; /**< transaction list */ uint64_t tx_cnt; - bool current_line_truncated; + bool current_line_truncated_ts; + bool current_line_truncated_tc; FtpRequestCommand command; FtpRequestCommandArgOfs arg_offset; diff --git a/src/decode-ipv6.c b/src/decode-ipv6.c index 7629a7c4672c..769b967ba13b 100644 --- a/src/decode-ipv6.c +++ b/src/decode-ipv6.c @@ -574,6 +574,7 @@ int DecodeIPV6(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, const uint8_t * CLEAR_IPV6_PACKET(p); return TM_ECODE_FAILED; } + p->proto = IPV6_GET_NH(p); #ifdef DEBUG if (SCLogDebugEnabled()) { /* only convert the addresses if debug is really enabled */ diff --git a/src/output-json-alert.c b/src/output-json-alert.c index d7918e81ffaa..472e6f98cded 100644 --- a/src/output-json-alert.c +++ b/src/output-json-alert.c @@ -253,7 +253,7 @@ static void AlertJsonSNMP(const Flow *f, const uint64_t tx_id, JsonBuilder *js) tx_id); if (tx != NULL) { jb_open_object(js, "snmp"); - rs_snmp_log_json_response(js, snmp_state, tx); + rs_snmp_log_json_response(js, tx); jb_close(js); } } diff --git a/src/output-json-snmp.c b/src/output-json-snmp.c index facf50770ac8..27545b6f6903 100644 --- a/src/output-json-snmp.c +++ b/src/output-json-snmp.c @@ -60,7 +60,7 @@ static int JsonSNMPLogger(ThreadVars *tv, void *thread_data, } jb_open_object(jb, "snmp"); - if (!rs_snmp_log_json_response(jb, state, snmptx)) { + if (!rs_snmp_log_json_response(jb, snmptx)) { goto error; } jb_close(jb); diff --git a/src/source-pcap-file-helper.c b/src/source-pcap-file-helper.c index de78fc45f1f2..8853080e9175 100644 --- a/src/source-pcap-file-helper.c +++ b/src/source-pcap-file-helper.c @@ -47,7 +47,7 @@ void CleanupPcapFileFileVars(PcapFileFileVars *pfv) if (pfv->shared != NULL && pfv->shared->should_delete) { SCLogDebug("Deleting pcap file %s", pfv->filename); if (unlink(pfv->filename) != 0) { - SCLogWarning("Failed to delete %s", pfv->filename); + SCLogWarning("Failed to delete %s: %s", pfv->filename, strerror(errno)); } } SCFree(pfv->filename); diff --git a/src/util-exception-policy.c b/src/util-exception-policy.c index 371644701546..4513bbb0f2ae 100644 --- a/src/util-exception-policy.c +++ b/src/util-exception-policy.c @@ -72,6 +72,9 @@ void ExceptionPolicyApply(Packet *p, enum ExceptionPolicy policy, enum PacketDro case EXCEPTION_POLICY_REJECT: SCLogDebug("EXCEPTION_POLICY_REJECT"); PacketDrop(p, ACTION_REJECT, drop_reason); + if (!EngineModeIsIPS()) { + break; + } /* fall through */ case EXCEPTION_POLICY_DROP_FLOW: SCLogDebug("EXCEPTION_POLICY_DROP_FLOW"); diff --git a/src/util-streaming-buffer.c b/src/util-streaming-buffer.c index 0b1e2981afde..3e7684c1557a 100644 --- a/src/util-streaming-buffer.c +++ b/src/util-streaming-buffer.c @@ -100,6 +100,97 @@ StreamingBufferBlock *SBB_RB_FIND_INCLUSIVE(struct SBB *head, StreamingBufferBlo return res; } +/** \interal + * \brief does data region intersect with list region 'r' + * Takes the max gap into account. + */ +static inline bool RegionsIntersect(const StreamingBuffer *sb, const StreamingBufferConfig *cfg, + const StreamingBufferRegion *r, const uint64_t offset, const uint64_t re) +{ + /* create the data range for the region, adding the max gap */ + const uint64_t reg_o = + r->stream_offset > cfg->region_gap ? (r->stream_offset - cfg->region_gap) : 0; + const uint64_t reg_re = r->stream_offset + r->buf_size + cfg->region_gap; + SCLogDebug("r %p: %" PRIu64 "/%" PRIu64 " - adjusted %" PRIu64 "/%" PRIu64, r, r->stream_offset, + r->stream_offset + r->buf_size, reg_o, reg_re); + /* check if data range intersects with region range */ + if (offset >= reg_o && offset <= reg_re) { + SCLogDebug("r %p is in-scope", r); + return true; + } + if (re >= reg_o && re <= reg_re) { + SCLogDebug("r %p is in-scope: %" PRIu64 " >= %" PRIu64 " && %" PRIu64 " <= %" PRIu64, r, re, + reg_o, re, reg_re); + return true; + } + SCLogDebug("r %p is out of scope: %" PRIu64 "/%" PRIu64, r, offset, re); + return false; +} + +/** \internal + * \brief find the first region for merging. + */ +static StreamingBufferRegion *FindFirstRegionForOffset(const StreamingBuffer *sb, + const StreamingBufferConfig *cfg, StreamingBufferRegion *r, const uint64_t offset, + const uint32_t len, StreamingBufferRegion **prev) +{ + const uint64_t data_re = offset + len; + SCLogDebug("looking for first region matching %" PRIu64 "/%" PRIu64, offset, data_re); + + StreamingBufferRegion *p = NULL; + for (; r != NULL; r = r->next) { + if (RegionsIntersect(sb, cfg, r, offset, data_re) == true) { + *prev = p; + return r; + } + p = r; + } + *prev = NULL; + return NULL; +} + +static StreamingBufferRegion *FindLargestRegionForOffset(const StreamingBuffer *sb, + const StreamingBufferConfig *cfg, StreamingBufferRegion *r, const uint64_t offset, + const uint32_t len) +{ + const uint64_t data_re = offset + len; + SCLogDebug("starting at %p/%" PRIu64 ", offset %" PRIu64 ", data_re %" PRIu64, r, + r->stream_offset, offset, data_re); + StreamingBufferRegion *candidate = r; + for (; r != NULL; r = r->next) { +#ifdef DEBUG + const uint64_t reg_re = r->stream_offset + r->buf_size; + SCLogDebug("checking: %p/%" PRIu64 "/%" PRIu64 ", offset %" PRIu64 "/%" PRIu64, r, + r->stream_offset, reg_re, offset, data_re); +#endif + if (!RegionsIntersect(sb, cfg, r, offset, data_re)) + return candidate; + + if (r->buf_size > candidate->buf_size) { + SCLogDebug("candidate %p as size %u > %u", candidate, r->buf_size, candidate->buf_size); + candidate = r; + } + } + return candidate; +} + +static StreamingBufferRegion *FindRightEdge(const StreamingBuffer *sb, + const StreamingBufferConfig *cfg, StreamingBufferRegion *r, const uint64_t offset, + const uint32_t len) +{ + const uint64_t data_re = offset + len; + StreamingBufferRegion *candidate = r; + for (; r != NULL; r = r->next) { + if (!RegionsIntersect(sb, cfg, r, offset, data_re)) { + SCLogDebug( + "r %p is out of scope: %" PRIu64 "/%u/%" PRIu64, r, offset, len, offset + len); + return candidate; + } + candidate = r; + } + return candidate; +} + static inline StreamingBufferRegion *InitBufferRegion( StreamingBuffer *sb, const StreamingBufferConfig *cfg, const uint32_t min_size) { @@ -753,7 +844,102 @@ static inline void StreamingBufferSlideToOffsetWithRegions( const uint32_t s = slide_offset - to_shift->stream_offset; if (s > 0) { const uint32_t new_data_size = to_shift->buf_size - s; - const uint32_t new_mem_size = ToNextMultipleOf(new_data_size, cfg->buf_size); + uint32_t new_mem_size = ToNextMultipleOf(new_data_size, cfg->buf_size); + + /* see if after the slide we'd overlap with the next region. If so, we need + * to consolidate them into one. Error handling is a bit peculiar. We need + * to grow a region, move data from another region into it, then free the + * other region. This could fail if we're close to the memcap. In this case + * we relax our rounding up logic so we only shrink and don't merge the 2 + * regions after all. */ + if (to_shift->next && slide_offset + new_mem_size >= to_shift->next->stream_offset) { + StreamingBufferRegion *start = to_shift; + StreamingBufferRegion *next = start->next; + const uint64_t next_re = next->stream_offset + next->buf_size; + const uint32_t mem_size = ToNextMultipleOf(next_re - slide_offset, cfg->buf_size); + + /* using next as the new main */ + if (start->buf_size < next->buf_size) { + SCLogDebug("replace main with the next bigger region"); + + const uint32_t next_data_offset = next->stream_offset - slide_offset; + const uint32_t prev_buf_size = next->buf_size; + const uint32_t start_data_offset = slide_offset - start->stream_offset; + DEBUG_VALIDATE_BUG_ON(start_data_offset > start->buf_size); + if (start_data_offset > start->buf_size) { + new_mem_size = new_data_size; + goto just_main; + } + /* expand "next" to include relevant part of "start" */ + if (GrowRegionToSize(sb, cfg, next, mem_size) != 0) { + new_mem_size = new_data_size; + goto just_main; + } + SCLogDebug("region now sized %u", mem_size); + + // slide "next": + // pre-grow: [nextnextnext] + // post-grow [nextnextnextXXX] + // post-move [XXXnextnextnext] + memmove(next->buf + next_data_offset, next->buf, prev_buf_size); + + // move portion of "start" into "next" + // + // start: [ooooookkkkk] (o: old, k: keep) + // pre-next [xxxxxnextnextnext] + // post-next [kkkkknextnextnext] + const uint32_t start_data_size = start->buf_size - start_data_offset; + memcpy(next->buf, start->buf + start_data_offset, start_data_size); + + // free "start"s buffer, we will use the one from "next" + FREE(cfg, start->buf, start->buf_size); + + // update "main" to use "next" + start->stream_offset = slide_offset; + start->buf = next->buf; + start->buf_size = next->buf_size; + start->next = next->next; + + // free "next" + FREE(cfg, next, sizeof(*next)); + sb->regions--; + BUG_ON(sb->regions == 0); + goto done; + } else { + /* using "main", expand to include "next" */ + if (GrowRegionToSize(sb, cfg, start, mem_size) != 0) { + new_mem_size = new_data_size; + goto just_main; + } + SCLogDebug("start->buf now size %u", mem_size); + + // slide "start" + // pre: [xxxxxxxAAA] + // post: [AAAxxxxxxx] + SCLogDebug("s %u new_data_size %u", s, new_data_size); + memmove(start->buf, start->buf + s, new_data_size); + + // copy in "next" + // pre: [AAAxxxxxxx] + // [BBBBBBB] + // post: [AAABBBBBBB] + SCLogDebug("copy next->buf %p/%u to start->buf offset %u", next->buf, + next->buf_size, new_data_size); + memcpy(start->buf + new_data_size, next->buf, next->buf_size); + + start->stream_offset = slide_offset; + start->next = next->next; + + // free "next" + FREE(cfg, next->buf, next->buf_size); + FREE(cfg, next, sizeof(*next)); + sb->regions--; + BUG_ON(sb->regions == 0); + goto done; + } + } + + just_main: SCLogDebug("s %u new_data_size %u", s, new_data_size); memmove(to_shift->buf, to_shift->buf + s, new_data_size); /* shrink memory region. If this fails we keep the old */ @@ -761,6 +947,7 @@ static inline void StreamingBufferSlideToOffsetWithRegions( if (ptr != NULL) { to_shift->buf = ptr; to_shift->buf_size = new_mem_size; + SCLogDebug("new buf_size %u", to_shift->buf_size); } if (s < to_shift->buf_offset) to_shift->buf_offset -= s; @@ -770,6 +957,7 @@ static inline void StreamingBufferSlideToOffsetWithRegions( } } +done: SCLogDebug("main: offset %" PRIu64 " buf %p size %u offset %u", sb->region.stream_offset, sb->region.buf, sb->region.buf_size, sb->region.buf_offset); SCLogDebug("end of slide"); @@ -991,97 +1179,6 @@ static void ListRegions(StreamingBuffer *sb) #endif } -/** \interal - * \brief does data region intersect with list region 'r' - * Takes the max gap into account. - */ -static inline bool RegionsIntersect(const StreamingBuffer *sb, const StreamingBufferConfig *cfg, - const StreamingBufferRegion *r, const uint64_t offset, const uint64_t re) -{ - /* create the data range for the region, adding the max gap */ - const uint64_t reg_o = - r->stream_offset > cfg->region_gap ? (r->stream_offset - cfg->region_gap) : 0; - const uint64_t reg_re = r->stream_offset + r->buf_size + cfg->region_gap; - SCLogDebug("r %p: %" PRIu64 "/%" PRIu64 " - adjusted %" PRIu64 "/%" PRIu64, r, r->stream_offset, - r->stream_offset + r->buf_size, reg_o, reg_re); - /* check if data range intersects with region range */ - if (offset >= reg_o && offset <= reg_re) { - SCLogDebug("r %p is in-scope", r); - return true; - } - if (re >= reg_o && re <= reg_re) { - SCLogDebug("r %p is in-scope: %" PRIu64 " >= %" PRIu64 " && %" PRIu64 " <= %" PRIu64, r, re, - reg_o, re, reg_re); - return true; - } - SCLogDebug("r %p is out of scope: %" PRIu64 "/%" PRIu64, r, offset, re); - return false; -} - -/** \internal - * \brief find the first region for merging. - */ -static StreamingBufferRegion *FindFirstRegionForOffset(const StreamingBuffer *sb, - const StreamingBufferConfig *cfg, StreamingBufferRegion *r, const uint64_t offset, - const uint32_t len, StreamingBufferRegion **prev) -{ - const uint64_t data_re = offset + len; - SCLogDebug("looking for first region matching %" PRIu64 "/%" PRIu64, offset, data_re); - - StreamingBufferRegion *p = NULL; - for (; r != NULL; r = r->next) { - if (RegionsIntersect(sb, cfg, r, offset, data_re) == true) { - *prev = p; - return r; - } - p = r; - } - *prev = NULL; - return NULL; -} - -static StreamingBufferRegion *FindLargestRegionForOffset(const StreamingBuffer *sb, - const StreamingBufferConfig *cfg, StreamingBufferRegion *r, const uint64_t offset, - const uint32_t len) -{ - const uint64_t data_re = offset + len; - SCLogDebug("starting at %p/%" PRIu64 ", offset %" PRIu64 ", data_re %" PRIu64, r, - r->stream_offset, offset, data_re); - StreamingBufferRegion *candidate = r; - for (; r != NULL; r = r->next) { -#ifdef DEBUG - const uint64_t reg_re = r->stream_offset + r->buf_size; - SCLogDebug("checking: %p/%" PRIu64 "/%" PRIu64 ", offset %" PRIu64 "/%" PRIu64, r, - r->stream_offset, reg_re, offset, data_re); -#endif - if (!RegionsIntersect(sb, cfg, r, offset, data_re)) - return candidate; - - if (r->buf_size > candidate->buf_size) { - SCLogDebug("candidate %p as size %u > %u", candidate, r->buf_size, candidate->buf_size); - candidate = r; - } - } - return candidate; -} - -static StreamingBufferRegion *FindRightEdge(const StreamingBuffer *sb, - const StreamingBufferConfig *cfg, StreamingBufferRegion *r, const uint64_t offset, - const uint32_t len) -{ - const uint64_t data_re = offset + len; - StreamingBufferRegion *candidate = r; - for (; r != NULL; r = r->next) { - if (!RegionsIntersect(sb, cfg, r, offset, data_re)) { - SCLogDebug( - "r %p is out of scope: %" PRIu64 "/%u/%" PRIu64, r, offset, len, offset + len); - return candidate; - } - candidate = r; - } - return candidate; -} - /** \internal * \brief process insert by consolidating the affected regions into one */ @@ -1119,7 +1216,11 @@ static StreamingBufferRegion *BufferInsertAtRegionConsolidate(StreamingBuffer *s DEBUG_VALIDATE_BUG_ON(dst_size != dst->buf_size); if (dst_copy_offset != 0) memmove(dst->buf + dst_copy_offset, dst->buf, old_size); - dst->stream_offset = dst_offset; + if (dst_offset != dst->stream_offset) { + dst->stream_offset = dst_offset; + // buf_offset no longer valid, reset. + dst->buf_offset = 0; + } uint32_t new_offset = src_start->buf_offset; if (data_offset == src_start->stream_offset + src_start->buf_offset) { @@ -1360,7 +1461,7 @@ int StreamingBufferInsertAt(StreamingBuffer *sb, const StreamingBufferConfig *cf seg->segment_len = data_len; SCLogDebug("rel_offset %u region->stream_offset %" PRIu64 ", buf_offset %u", rel_offset, - region->stream_offset, sb->region.buf_offset); + region->stream_offset, region->buf_offset); if (RB_EMPTY(&sb->sbb_tree)) { SCLogDebug("empty sbb list");