Skip to content
Merged
31 changes: 28 additions & 3 deletions .github/workflows/builds.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,16 @@
name: builds

on:
- push
- pull_request
push:
pull_request:
workflow_dispatch:
inputs:
LIBHTP_REPO:
LIBHTP_BRANCH:
SU_REPO:
SU_BRANCH:
SV_REPO:
SV_BRANCH:

permissions: read-all

Expand Down Expand Up @@ -56,7 +64,24 @@ jobs:
SV_REPO=$(echo "${body}" | awk -F = '/^SV_REPO=/ { print $2 }')
SV_BRANCH=$(echo "${body}" | awk -F = '/^SV_BRANCH=/ { print $2 }')
else
echo "No pull request body, will use defaults."
echo "No pull request body, will use inputs or defaults."
LIBHTP_REPO=${{ inputs.LIBHTP_REPO }}
LIBHTP_BRANCH=${{ inputs.LIBHTP_BRANCH }}
SU_REPO=${{ inputs.SU_REPO }}
SU_BRANCH=${{ inputs.SU_BRANCH }}
SV_REPO=${{ inputs.SV_REPO }}
SV_BRANCH=${{ inputs.SV_BRANCH }}
fi

# If the _REPO variables don't contain a full URL, add GitHub.
if [ "${LIBHTP_REPO}" ] && ! echo "${LIBHTP_REPO}" | grep -q '^https://'; then
LIBHTP_REPO="https://github.com/${LIBHTP_REPO}"
fi
if [ "${SU_REPO}" ] && ! echo "${SU_REPO}" | grep -q '^https://'; then
SU_REPO="https://github.com/${SU_REPO}"
fi
if [ "${SV_REPO}" ] && ! echo "${SV_REPO}" | grep -q '^https://'; then
SV_REPO="https://github.com/${SV_REPO}"
fi

echo LIBHTP_REPO=${LIBHTP_REPO} | tee -a ${GITHUB_ENV}
Expand Down
4 changes: 2 additions & 2 deletions doc/userguide/output/eve/eve-json-format.rst
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ suricata.yaml file the following fields are (can) also included:
* "status": HTTP status code
* "protocol": Protocol / Version of HTTP (ex: HTTP/1.1)
* "http_method": The HTTP method (ex: GET, POST, HEAD)
* "http_refer": The referrer for this action
* "http_refer": The referer for this action

In addition to the extended logging fields one can also choose to enable/add
from more than 50 additional custom logging HTTP fields enabled in the
Expand Down Expand Up @@ -318,7 +318,7 @@ suricata.yaml file. The additional fields can be enabled as following:
allow, connection, content-encoding, content-language,
content-length, content-location, content-md5, content-range,
content-type, date, etags, expires, last-modified, link, location,
proxy-authenticate, referrer, refresh, retry-after, server,
proxy-authenticate, referer, refresh, retry-after, server,
set-cookie, trailer, transfer-encoding, upgrade, vary, warning,
www-authenticate, x-flash-version, x-authenticated-user]

Expand Down
2 changes: 1 addition & 1 deletion doc/userguide/output/eve/eve-json-output.rst
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ last_modified last-modified
link link
location location
proxy_authenticate proxy-authenticate
referrer referrer
referer referer
refresh refresh
retry_after retry-after
server server
Expand Down
2 changes: 1 addition & 1 deletion src/detect-engine-alert.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ static inline void FlowApplySignatureActions(
* - sig is IP or PD only
* - match is in applayer
* - match is in stream */
if (s->action & (ACTION_DROP | ACTION_PASS)) {
if (pa->action & (ACTION_DROP | ACTION_PASS)) {
DEBUG_VALIDATE_BUG_ON(s->type == SIG_TYPE_NOT_SET);
DEBUG_VALIDATE_BUG_ON(s->type == SIG_TYPE_MAX);

Expand Down
6 changes: 4 additions & 2 deletions src/detect.c
Original file line number Diff line number Diff line change
Expand Up @@ -1709,9 +1709,11 @@ static void DetectFlow(ThreadVars *tv,
return;
}

/* if flow is set to drop, we enforce that here */
/* we check the flow drop here, and not the packet drop. This is
* to allow stream engine "invalid" drop packets to still be
* evaluated by the stream event rules. */
if (f->flags & FLOW_ACTION_DROP) {
PacketDrop(p, ACTION_DROP, PKT_DROP_REASON_FLOW_DROP);
DEBUG_VALIDATE_BUG_ON(!(PKT_IS_PSEUDOPKT(p)) && !PacketCheckAction(p, ACTION_DROP));
SCReturn;
}

Expand Down
3 changes: 2 additions & 1 deletion src/flow-manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,8 @@ static uint32_t ProcessAsideQueue(FlowManagerTimeoutThread *td, FlowTimeoutCount
while ((f = FlowQueuePrivateGetFromTop(&td->aside_queue)) != NULL) {
/* flow is still locked */

if (f->proto == IPPROTO_TCP && !(f->flags & FLOW_TIMEOUT_REASSEMBLY_DONE) &&
if (f->proto == IPPROTO_TCP &&
!(f->flags & (FLOW_TIMEOUT_REASSEMBLY_DONE | FLOW_ACTION_DROP)) &&
!FlowIsBypassed(f) && FlowForceReassemblyNeedReassembly(f) == 1) {
/* Send the flow to its thread */
FlowForceReassemblyForFlow(f);
Expand Down
43 changes: 24 additions & 19 deletions src/flow-worker.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@
#include "suricata-common.h"
#include "suricata.h"

#include "action-globals.h"
#include "packet.h"
#include "decode.h"
#include "detect.h"
#include "stream-tcp.h"
Expand Down Expand Up @@ -183,8 +185,9 @@ static void CheckWorkQueue(ThreadVars *tv, FlowWorkerThreadData *fw, FlowTimeout
f->flow_end_flags |= FLOW_END_FLAG_TIMEOUT; //TODO emerg

if (f->proto == IPPROTO_TCP) {
if (!(f->flags & FLOW_TIMEOUT_REASSEMBLY_DONE) && !FlowIsBypassed(f) &&
FlowForceReassemblyNeedReassembly(f) == 1 && f->ffr != 0) {
if (!(f->flags & (FLOW_TIMEOUT_REASSEMBLY_DONE | FLOW_ACTION_DROP)) &&
!FlowIsBypassed(f) && FlowForceReassemblyNeedReassembly(f) == 1 &&
f->ffr != 0) {
/* read detect thread in case we're doing a reload */
void *detect_thread = SC_ATOMIC_GET(fw->detect_thread);
int cnt = FlowFinish(tv, f, fw, detect_thread);
Expand Down Expand Up @@ -549,26 +552,28 @@ static TmEcode FlowWorker(ThreadVars *tv, Packet *p, void *data)
SCLogDebug("packet %"PRIu64" has flow? %s", p->pcap_cnt, p->flow ? "yes" : "no");

/* handle TCP and app layer */
if (p->flow && PKT_IS_TCP(p)) {
SCLogDebug("packet %"PRIu64" is TCP. Direction %s", p->pcap_cnt, PKT_IS_TOSERVER(p) ? "TOSERVER" : "TOCLIENT");
DEBUG_ASSERT_FLOW_LOCKED(p->flow);
if (p->flow) {
if (PKT_IS_TCP(p)) {
SCLogDebug("packet %" PRIu64 " is TCP. Direction %s", p->pcap_cnt,
PKT_IS_TOSERVER(p) ? "TOSERVER" : "TOCLIENT");
DEBUG_ASSERT_FLOW_LOCKED(p->flow);

/* if detect is disabled, we need to apply file flags to the flow
* here on the first packet. */
if (detect_thread == NULL &&
((PKT_IS_TOSERVER(p) && (p->flowflags & FLOW_PKT_TOSERVER_FIRST)) ||
(PKT_IS_TOCLIENT(p) && (p->flowflags & FLOW_PKT_TOCLIENT_FIRST))))
{
DisableDetectFlowFileFlags(p->flow);
}
/* if detect is disabled, we need to apply file flags to the flow
* here on the first packet. */
if (detect_thread == NULL &&
((PKT_IS_TOSERVER(p) && (p->flowflags & FLOW_PKT_TOSERVER_FIRST)) ||
(PKT_IS_TOCLIENT(p) && (p->flowflags & FLOW_PKT_TOCLIENT_FIRST)))) {
DisableDetectFlowFileFlags(p->flow);
}

FlowWorkerStreamTCPUpdate(tv, fw, p, detect_thread, false);
FlowWorkerStreamTCPUpdate(tv, fw, p, detect_thread, false);

/* handle the app layer part of the UDP packet payload */
} else if (p->flow && p->proto == IPPROTO_UDP) {
FLOWWORKER_PROFILING_START(p, PROFILE_FLOWWORKER_APPLAYERUDP);
AppLayerHandleUdp(tv, fw->stream_thread->ra_ctx->app_tctx, p, p->flow);
FLOWWORKER_PROFILING_END(p, PROFILE_FLOWWORKER_APPLAYERUDP);
/* handle the app layer part of the UDP packet payload */
} else if (p->proto == IPPROTO_UDP && !PacketCheckAction(p, ACTION_DROP)) {
FLOWWORKER_PROFILING_START(p, PROFILE_FLOWWORKER_APPLAYERUDP);
AppLayerHandleUdp(tv, fw->stream_thread->ra_ctx->app_tctx, p, p->flow);
FLOWWORKER_PROFILING_END(p, PROFILE_FLOWWORKER_APPLAYERUDP);
}
}

PacketUpdateEngineEventCounters(tv, fw->dtv, p);
Expand Down
6 changes: 6 additions & 0 deletions src/flow.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@

#include "suricata-common.h"
#include "suricata.h"

#include "action-globals.h"
#include "packet.h"
#include "decode.h"
#include "conf.h"
#include "threadvars.h"
Expand Down Expand Up @@ -505,6 +508,9 @@ void FlowHandlePacketUpdate(Flow *f, Packet *p, ThreadVars *tv, DecodeThreadVars
FlowUpdateState(f, FLOW_STATE_ESTABLISHED);
}

if (f->flags & FLOW_ACTION_DROP) {
PacketDrop(p, ACTION_DROP, PKT_DROP_REASON_FLOW_DROP);
}
/*set the detection bypass flags*/
if (f->flags & FLOW_NOPACKET_INSPECTION) {
SCLogDebug("setting FLOW_NOPACKET_INSPECTION flag on flow %p", f);
Expand Down
121 changes: 55 additions & 66 deletions src/output-json-http.c
Original file line number Diff line number Diff line change
Expand Up @@ -138,15 +138,15 @@ struct {
const char *config_field;
const char *htp_field;
uint32_t flags;
} http_fields[] = {
} http_fields[] = {
{ "accept", "accept", LOG_HTTP_REQUEST },
{ "accept_charset", "accept-charset", LOG_HTTP_REQUEST },
{ "accept_encoding", "accept-encoding", LOG_HTTP_REQUEST },
{ "accept_language", "accept-language", LOG_HTTP_REQUEST },
{ "accept_datetime", "accept-datetime", LOG_HTTP_REQUEST },
{ "authorization", "authorization", LOG_HTTP_REQUEST },
{ "cache_control", "cache-control", LOG_HTTP_REQUEST },
{ "cookie", "cookie", LOG_HTTP_REQUEST|LOG_HTTP_ARRAY },
{ "cookie", "cookie", LOG_HTTP_REQUEST | LOG_HTTP_ARRAY },
{ "from", "from", LOG_HTTP_REQUEST },
{ "max_forwards", "max-forwards", LOG_HTTP_REQUEST },
{ "origin", "origin", LOG_HTTP_REQUEST },
Expand All @@ -173,12 +173,12 @@ struct {
{ "content_type", "content-type", 0 },
{ "date", "date", 0 },
{ "etag", "etags", 0 },
{ "expires", "expires" , 0 },
{ "expires", "expires", 0 },
{ "last_modified", "last-modified", 0 },
{ "link", "link", 0 },
{ "location", "location", 0 },
{ "proxy_authenticate", "proxy-authenticate", 0 },
{ "referrer", "referrer", LOG_HTTP_EXTENDED },
{ "referer", "referer", LOG_HTTP_EXTENDED },
{ "refresh", "refresh", 0 },
{ "retry_after", "retry-after", 0 },
{ "server", "server", 0 },
Expand Down Expand Up @@ -264,46 +264,6 @@ static void EveHttpLogJSONBasic(JsonBuilder *js, htp_tx_t *tx)
}
}

static void EveHttpLogJSONCustom(LogHttpFileCtx *http_ctx, JsonBuilder *js, htp_tx_t *tx)
{
char *c;
HttpField f;

for (f = HTTP_FIELD_ACCEPT; f < HTTP_FIELD_SIZE; f++)
{
if ((http_ctx->fields & (1ULL<<f)) != 0)
{
/* prevent logging a field twice if extended logging is
enabled */
if (((http_ctx->flags & LOG_HTTP_EXTENDED) == 0) ||
((http_ctx->flags & LOG_HTTP_EXTENDED) !=
(http_fields[f].flags & LOG_HTTP_EXTENDED)))
{
htp_header_t *h_field = NULL;
if ((http_fields[f].flags & LOG_HTTP_REQUEST) != 0)
{
if (tx->request_headers != NULL) {
h_field = htp_table_get_c(tx->request_headers,
http_fields[f].htp_field);
}
} else {
if (tx->response_headers != NULL) {
h_field = htp_table_get_c(tx->response_headers,
http_fields[f].htp_field);
}
}
if (h_field != NULL) {
c = bstr_util_strdup_to_c(h_field->value);
if (c != NULL) {
jb_set_string(js, http_fields[f].config_field, c);
SCFree(c);
}
}
}
}
}
}

static void EveHttpLogJSONExtended(JsonBuilder *js, htp_tx_t *tx)
{
/* referer */
Expand Down Expand Up @@ -348,19 +308,44 @@ static void EveHttpLogJSONExtended(JsonBuilder *js, htp_tx_t *tx)
jb_set_uint(js, "length", tx->response_message_len);
}

static void EveHttpLogJSONHeaders(JsonBuilder *js, uint32_t direction, htp_tx_t *tx)
static void EveHttpLogJSONHeaders(
JsonBuilder *js, uint32_t direction, htp_tx_t *tx, LogHttpFileCtx *http_ctx)
{
htp_table_t * headers = direction & LOG_HTTP_REQ_HEADERS ?
tx->request_headers : tx->response_headers;
char name[MAX_SIZE_HEADER_NAME] = {0};
char value[MAX_SIZE_HEADER_VALUE] = {0};
size_t n = htp_table_size(headers);
JsonBuilderMark mark = { 0, 0, 0 };
jb_get_mark(js, &mark);
bool array_empty = true;
jb_open_array(js, direction & LOG_HTTP_REQ_HEADERS ? "request_headers" : "response_headers");
for (size_t i = 0; i < n; i++) {
htp_header_t * h = htp_table_get_index(headers, i, NULL);
if (h == NULL) {
continue;
}
if ((http_ctx->flags & direction) == 0 && http_ctx->fields != 0) {
bool tolog = false;
for (HttpField f = HTTP_FIELD_ACCEPT; f < HTTP_FIELD_SIZE; f++) {
if ((http_ctx->fields & (1ULL << f)) != 0) {
/* prevent logging a field twice if extended logging is
enabled */
if (((http_ctx->flags & LOG_HTTP_EXTENDED) == 0) ||
((http_ctx->flags & LOG_HTTP_EXTENDED) !=
(http_fields[f].flags & LOG_HTTP_EXTENDED))) {
if (bstr_cmp_c_nocase(h->name, http_fields[f].htp_field) == 0) {
tolog = true;
break;
}
}
}
}
if (!tolog) {
continue;
}
}
array_empty = false;
jb_start_object(js);
size_t size_name = bstr_len(h->name) < MAX_SIZE_HEADER_NAME - 1 ?
bstr_len(h->name) : MAX_SIZE_HEADER_NAME - 1;
Expand All @@ -374,8 +359,12 @@ static void EveHttpLogJSONHeaders(JsonBuilder *js, uint32_t direction, htp_tx_t
jb_set_string(js, "value", value);
jb_close(js);
}
// Close array.
jb_close(js);
if (array_empty) {
jb_restore_mark(js, &mark);
} else {
// Close array.
jb_close(js);
}
}

static void BodyPrintableBuffer(JsonBuilder *js, HtpBody *body, const char *key)
Expand Down Expand Up @@ -454,15 +443,12 @@ static void EveHttpLogJSON(JsonHttpLogThread *aft, JsonBuilder *js, htp_tx_t *tx
jb_open_object(js, "http");

EveHttpLogJSONBasic(js, tx);
/* log custom fields if configured */
if (http_ctx->fields != 0)
EveHttpLogJSONCustom(http_ctx, js, tx);
if (http_ctx->flags & LOG_HTTP_EXTENDED)
EveHttpLogJSONExtended(js, tx);
if (http_ctx->flags & LOG_HTTP_REQ_HEADERS)
EveHttpLogJSONHeaders(js, LOG_HTTP_REQ_HEADERS, tx);
if (http_ctx->flags & LOG_HTTP_RES_HEADERS)
EveHttpLogJSONHeaders(js, LOG_HTTP_RES_HEADERS, tx);
if (http_ctx->flags & LOG_HTTP_REQ_HEADERS || http_ctx->fields != 0)
EveHttpLogJSONHeaders(js, LOG_HTTP_REQ_HEADERS, tx, http_ctx);
if (http_ctx->flags & LOG_HTTP_RES_HEADERS || http_ctx->fields != 0)
EveHttpLogJSONHeaders(js, LOG_HTTP_RES_HEADERS, tx, http_ctx);

jb_close(js);
}
Expand Down Expand Up @@ -566,8 +552,23 @@ static OutputInitResult OutputHttpLogInitSub(ConfNode *conf, OutputCtx *parent_c
}
}

const char *all_headers = ConfNodeLookupChildValue(conf, "dump-all-headers");
if (all_headers != NULL) {
if (strncmp(all_headers, "both", 4) == 0) {
http_ctx->flags |= LOG_HTTP_REQ_HEADERS;
http_ctx->flags |= LOG_HTTP_RES_HEADERS;
} else if (strncmp(all_headers, "request", 7) == 0) {
http_ctx->flags |= LOG_HTTP_REQ_HEADERS;
} else if (strncmp(all_headers, "response", 8) == 0) {
http_ctx->flags |= LOG_HTTP_RES_HEADERS;
}
}
ConfNode *custom;
if ((custom = ConfNodeLookupChild(conf, "custom")) != NULL) {
if ((http_ctx->flags & (LOG_HTTP_REQ_HEADERS | LOG_HTTP_RES_HEADERS)) ==
(LOG_HTTP_REQ_HEADERS | LOG_HTTP_RES_HEADERS)) {
SCLogWarning("No need for custom as dump-all-headers is already present");
}
ConfNode *field;
TAILQ_FOREACH (field, &custom->head, next) {
HttpField f;
Expand All @@ -580,18 +581,6 @@ static OutputInitResult OutputHttpLogInitSub(ConfNode *conf, OutputCtx *parent_c
}
}
}
const char *all_headers = ConfNodeLookupChildValue(
conf, "dump-all-headers");
if (all_headers != NULL) {
if (strncmp(all_headers, "both", 4) == 0) {
http_ctx->flags |= LOG_HTTP_REQ_HEADERS;
http_ctx->flags |= LOG_HTTP_RES_HEADERS;
} else if (strncmp(all_headers, "request", 7) == 0) {
http_ctx->flags |= LOG_HTTP_REQ_HEADERS;
} else if (strncmp(all_headers, "response", 8) == 0) {
http_ctx->flags |= LOG_HTTP_RES_HEADERS;
}
}
}

if (conf != NULL && ConfNodeLookupChild(conf, "xff") != NULL) {
Expand Down
Loading