From 2c9c0a6ef45d851c016ae929afde8e275bebdb67 Mon Sep 17 00:00:00 2001 From: Louis Poinsignon Date: Tue, 6 Aug 2019 17:02:53 -0700 Subject: [PATCH 1/2] Version 3 --- .travis.yml | 15 + Dockerfile | 17 +- Gopkg.toml | 46 -- Makefile | 48 +- README.md | 138 ++-- cmd/cnetflow/cnetflow.go | 86 +++ cmd/cnflegacy/cnflegacy.go | 84 +++ cmd/csflow/csflow.go | 84 +++ cmd/goflow/goflow.go | 147 +++++ decoders/decoder.go | 1 - decoders/netflow/ipfix.go | 2 +- decoders/netflow/nfv9.go | 2 +- decoders/netflowlegacy/netflow.go | 52 ++ decoders/netflowlegacy/netflow_test.go | 40 ++ decoders/netflowlegacy/packet.go | 96 +++ decoders/sflow/sflow.go | 133 +++- decoders/sflow/sflow_test.go | 27 + go.mod | 12 + go.sum | 67 ++ goflow.go | 840 ------------------------- pb/flow.pb.go | 380 ++++++----- pb/flow.proto | 105 ++-- producer/producer_nf.go | 50 +- producer/producer_nflegacy.go | 78 +++ producer/producer_sf.go | 27 +- producer/producer_test.go | 77 +++ transport/kafka.go | 120 +++- transport/transport_test.go | 15 + metrics.go => utils/metrics.go | 20 +- utils/netflow.go | 349 ++++++++++ utils/nflegacy.go | 76 +++ utils/sflow.go | 135 ++++ utils/utils.go | 217 +++++++ 33 files changed, 2280 insertions(+), 1306 deletions(-) create mode 100644 .travis.yml delete mode 100644 Gopkg.toml create mode 100644 cmd/cnetflow/cnetflow.go create mode 100644 cmd/cnflegacy/cnflegacy.go create mode 100644 cmd/csflow/csflow.go create mode 100644 cmd/goflow/goflow.go create mode 100644 decoders/netflowlegacy/netflow.go create mode 100644 decoders/netflowlegacy/netflow_test.go create mode 100644 decoders/netflowlegacy/packet.go create mode 100644 decoders/sflow/sflow_test.go create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 goflow.go create mode 100644 producer/producer_nflegacy.go create mode 100644 producer/producer_test.go create mode 100644 transport/transport_test.go rename metrics.go => utils/metrics.go (93%) create mode 100644 utils/netflow.go create mode 100644 utils/nflegacy.go create mode 100644 utils/sflow.go create mode 100644 utils/utils.go diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..6db423e --- /dev/null +++ b/.travis.yml @@ -0,0 +1,15 @@ +language: go +go_import_path: github.com/cloudflare/goflow +go: + - 1.12.x + +script: + - GO111MODULE=on make + +notifications: + email: + recipients: + - louis@cloudflare.com + on_success: never + on_failure: change + diff --git a/Dockerfile b/Dockerfile index 9171642..e1718b8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,12 @@ -ARG src_dir="/go/src/github.com/cloudflare/goflow" - FROM golang:alpine as builder -ARG src_dir +ARG VERSION="" -RUN apk --update --no-cache add git && \ - mkdir -p ${src_dir} +RUN apk --update --no-cache add git build-base gcc -WORKDIR ${src_dir} -COPY . . +COPY . /build +WORKDIR /build -RUN go get -u github.com/golang/dep/cmd/dep && \ - dep ensure && \ - go build +RUN go build -ldflags "-X main.version=${VERSION}" -o goflow cmd/goflow/goflow.go FROM alpine:latest ARG src_dir @@ -19,6 +14,6 @@ ARG src_dir RUN apk update --no-cache && \ adduser -S -D -H -h / flow USER flow -COPY --from=builder ${src_dir}/goflow / +COPY --from=builder /build/goflow / ENTRYPOINT ["./goflow"] diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index b7f46a1..0000000 --- a/Gopkg.toml +++ /dev/null @@ -1,46 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/Sirupsen/logrus" - version = "1.1.0" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "1.2.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.8.0" - -[[constraint]] - name = "github.com/Shopify/sarama" - version = "1.19.0" - -[prune] - go-tests = true - unused-packages = true diff --git a/Makefile b/Makefile index 866b0ff..984b28f 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,48 @@ +IMAGE ?= cloudflare/goflow +VERSION ?= $(shell git describe --tags --always --dirty) +VERSION_DOCKER ?= $(shell git describe --tags --abbrev=0 --always --dirty) + +GOOS ?= linux +ARCH ?= $(shell uname -m) + +.PHONY: all +all: test-race vet test + +.PHONY: clean +clean: + rm -rf bin + +.PHONY: build +build: + @echo compiling code + mkdir bin + GOOS=$(GOOS) go build -ldflags '-X main.version=$(VERSION)' -o bin/goflow-$(GOOS)-$(ARCH) cmd/goflow/goflow.go + GOOS=$(GOOS) go build -ldflags '-X main.version=$(VERSION)' -o bin/goflow-sflow-$(GOOS)-$(ARCH) cmd/csflow/csflow.go + GOOS=$(GOOS) go build -ldflags '-X main.version=$(VERSION)' -o bin/goflow-netflow-$(GOOS)-$(ARCH) cmd/cnetflow/cnetflow.go + GOOS=$(GOOS) go build -ldflags '-X main.version=$(VERSION)' -o bin/goflow-nflegacy-$(GOOS)-$(ARCH) cmd/cnflegacy/cnflegacy.go + + +.PHONY: container +container: + @echo build docker container + docker build --build-arg VERSION=$(VERSION) -t $(IMAGE):$(VERSION_DOCKER) . + .PHONY: proto +proto: + @echo generating protobuf + protoc --go_out=. --plugin=$(PROTOCPATH)protoc-gen-go pb/*.proto + +.PHONY: test +test: + @echo testing code + go test ./... + +.PHONY: vet +vet: + @echo checking code is vetted + go vet $(shell go list ./...) -proto: - protoc $$PROTO_PATH --go_out=. pb/flow.proto +.PHONY: test-race +test-race: + @echo testing code for races + go test -race ./... diff --git a/README.md b/README.md index 35c478d..6b4dbec 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,12 @@ which contains the fields a network engineer is interested in. The flow packets usually contains multiples samples This acts as an abstraction of a sample. +The `transport` provides different way of processing the protobuf. Either sending it via Kafka or +print it on the console. + +Finally, `utils` provide functions that are directly used by the CLI utils. GoFlow is a wrapper of all the functions and chains thems into producing bytes into Kafka. +There is also one CLI tool per protocol. You can build your own collector using this base and replace parts: * Use different transport (eg: RabbitMQ instead of Kafka) @@ -29,8 +34,6 @@ You can build your own collector using this base and replace parts: * Decode different samples (eg: not only IP networks, add MPLS) * Different metrics system (eg: use [expvar](https://golang.org/pkg/expvar/) instead of Prometheus) -Starting on v2.0.0: you have an increased flexibility and less interdependence in the code. - ### Protocol difference The sampling protocols can be very different: @@ -50,6 +53,7 @@ protocols (eg: per ASN or per port, rather than per (ASN, router) and (port, rou ## Features Collection: +* NetFlow v5 * IPFIX/NetFlow v9 * Handles sampling rate provided by the Option Data Set * sFlow v5: RAW, IPv4, IPv6, Ethernet samples, Gateway data, router data, switch data @@ -57,6 +61,7 @@ Collection: Production: * Convert to protobuf * Sends to Kafka producer +* Prints to the console Monitoring: * Prometheus metrics @@ -73,24 +78,24 @@ Download the latest release and just run the following command: ./goflow -h ``` -Enable or disable a protocol using `-netflow=false` or `-sflow=false`. -Define the port and addresses of the protocols using `-faddr`, `-fport` for NetFlow and `-saddr`, `-sport` for sFlow. - -Set the `-loglevel` to `debug` mode to see what is received. +Enable or disable a protocol using `-nf=false` or `-sflow=false`. +Define the port and addresses of the protocols using `-nf.addr`, `-nf.port` for NetFlow and `-sflow.addr`, `-slow.port` for sFlow. Set the brokers or the Kafka brokers SRV record using: `-kafka.out.brokers 127.0.0.1:9092,[::1]:9092` or `-kafka.out.srv`. Disable Kafka sending `-kafka=false`. +You can hash the protobuf by key when you send it to Kafka. -You can collect NetFlow/IPFIX and sFlow using the same. +You can collect NetFlow/IPFIX, NetFlow v5 and sFlow using the same collector +or use the single-protocol collectors. -You can define the number of workers per protocol using `-fworkers` and `-sworkers`. +You can define the number of workers per protocol using `-workers` . ## Docker We also provide a all-in-one Docker container. To run it in debug mode without sending into Kafka: ``` -$ sudo docker run --net=host -ti cloudflare/goflow:latest -kafka=false -loglevel debug +$ sudo docker run --net=host -ti cloudflare/goflow:latest -kafka=false ``` ## Environment @@ -109,7 +114,6 @@ is preserved when adding new fields (thus the fields will be lost if re-serializ Once the updated flows are back into Kafka, they are **consumed** by **database inserters** (Clickhouse, Amazon Redshift, Google BigTable...) to allow for static analysis. Other teams access the network data just like any other log (SQL query). -They are also consumed by a Flink cluster in order to be **aggregated** and give live traffic information. ### Output format @@ -117,9 +121,7 @@ If you want to develop applications, build `pb/flow.proto` into the language you Example in Go: ``` -export SRC_DIR="path/to/goflow-pb" -protoc --proto_path=$SRC_DIR --plugin=/path/to/bin/protoc-gen-go $SRC_DIR/flow.proto --go_out=$SRC_DIR - +PROTOCPATH=$HOME/go/bin/ make proto ``` Example in Java: @@ -128,65 +130,79 @@ Example in Java: export SRC_DIR="path/to/goflow-pb" export DST_DIR="path/to/java/app/src/main/java" protoc -I=$SRC_DIR --java_out=$DST_DIR $SRC_DIR/flow.proto - ``` -The format is the following: - -| Field | Description | -| ----- | ----------- | -| FlowType | Indicates the protocol (IPFIX, NetFlow v9, sFlow v5) | -| TimeRecvd | Timestamp the packet was received by the collector | -| TimeFlow | Timestamp of the packet (same as TimeRecvd in sFlow, in NetFlow it's the uptime of the router minus LAST_SWITCHED field, in IPFIX it's flowEnd* field), meant to be replaced by TimeFlowEnd | -| SamplingRate | Sampling rate of the flow, used to extrapolate the number of bytes and packets | -| SequenceNum | Sequence number of the packet | -| SrcIP | Source IP (sequence of bytes, can be IPv4 or IPv6) | -| DstIP | Destination IP (sequence of bytes, can be IPv4 or IPv6) | -| IPType | Indicates if IPv4 or IPv6), meant to be replaced by Etype | -| Bytes | Number of bytes in the sample | -| Packets | Number of packets in the sample | -| RouterAddr | Address of the router (UDP source in NetFlow/IPFIX, Agent IP in sFlow) | -| NextHop | Next-hop IP | -| NextHopAS | Next-hop ASN when the next-hop is a BGP neighbor (not all the flows) | -| SrcAS | Source ASN (provided by BGP) | -| DstAS | Destination ASN (provided by BGP) | -| SrcNet | Network mask of the source IP (provided by BGP) | -| DstNet | Network mask of the destination IP (provided by BGP) | -| SrcIf | Source interface ID (SNMP id) | -| DstIf | Destination interface ID (SNMP id) | -| Proto | Protocol code: TCP, UDP, etc. | -| SrcPort | Source port when proto is UDP/TCP | -| DstPort | Destination port when proto is UDP/TCP | -| IPTos | IPv4 type of service / Traffic class in IPv6 | -| ForwardingStatus | If the packet has been [dropped, consumed or forwarded](https://www.iana.org/assignments/ipfix/ipfix.xhtml#forwarding-status) | -| IPTTL | Time to Live of the IP packet | -| TCPFlags | Flags of the TCP Packet (SYN, ACK, etc.) | -| SrcMac | Source Mac Address | -| DstMac | Destination Mac Address | -| VlanId | Vlan when 802.1q | -| Etype | Ethernet type (IPv4, IPv6, ARP, etc.) | -| IcmpType | ICMP Type | -| IcmpCode | ICMP Code | -| SrcVlan | Source VLAN | -| DstVlan | Destination VLAN | -| FragmentId | IP Fragment Identifier | -| FragmentOffset | IP Fragment Offset | -| IPv6FlowLabel | IPv6 Flow Label | -| TimeFlowStart | Start Timestamp of the flow (this field is empty for sFlow, in NetFlow it's the uptime of the router minus FIRST_SWITCHED field, in IPFIX it's flowStart* field) | -| TimeFlowEnd | End Timestamp of the flow (same as TimeRecvd in sFlow, in NetFlow it's the uptime of the router minus LAST_SWITCHED field, in IPFIX it's flowEnd* field) | -| IngressVrfId | Ingress VRF ID | -| EgressVrfId | Egress VRF ID | +The fields are listed in the following table. + +You can find information on how they are populated from the original source: +* For [sFlow](https://sflow.org/developers/specifications.php) +* For [NetFlow v5](https://www.cisco.com/c/en/us/td/docs/net_mgmt/netflow_collection_engine/3-6/user/guide/format.html) +* For [NetFlow v9](https://www.cisco.com/en/US/technologies/tk648/tk362/technologies_white_paper09186a00800a3db9.html) +* For [IPFIX](https://www.iana.org/assignments/ipfix/ipfix.xhtml) + +| Field | Description | NetFlow v5 | sFlow | NetFlow v9 | IPFIX | +| - | - | - | - | - | - | +|Type|Type of flow message|NETFLOW_V5|SFLOW_5|NETFLOW_V9|IPFIX| +|TimeReceived|Timestamp of when the message was received|Included|Included|Included|Included| +|SequenceNum|Sequence number of the flow packet|Included|Included|Included|Included| +|SamplingRate|Sampling rate of the flow|Included|Included|Included|Included| +|FlowDirection|Direction of the flow| | |DIRECTION (61)|flowDirection (61)| +|SamplerAddress|Address of the device that generated the packet|IP source of packet|Agent IP|IP source of packet|IP source of packet| +|TimeFlowStart|Time the flow started|System uptime and first|=TimeReceived|System uptime and FIRST_SWITCHED (22)|flowStartXXX (150, 152, 154, 156)| +|TimeFlowEnd|Time the flow ended|System uptime and last|=TimeReceived|System uptime and LAST_SWITCHED (23)|flowEndXXX (151, 153, 155, 157)| +|Bytes|Number of bytes in flow|dOctets|Length of sample|IN_BYTES (1) OUT_BYTES (23)|octetDeltaCount (1) postOctetDeltaCount (23)| +|Packets|Number of packets in flow|dPkts|=1|IN_PKTS (2) OUT_PKTS (24)|packetDeltaCount (1) postPacketDeltaCount (24)| +|SrcAddr|Source address (IP)|srcaddr (IPv4 only)|Included|Included|IPV4_SRC_ADDR (8) IPV6_SRC_ADDR (27)|sourceIPv4Address/sourceIPv6Address (8/27)| +|DstAddr|Destination address (IP)|dstaddr (IPv4 only)|Included|Included|IPV4_DST_ADDR (12) IPV6_DST_ADDR (28)|destinationIPv4Address (12)destinationIPv6Address (28)| +|Etype|Ethernet type (0x86dd for IPv6...)|IPv4|Included|Included|Included| +|Proto|Protocol (UDP, TCP, ICMP...)|prot|Included|PROTOCOL (4)|protocolIdentifier (4)| +|SrcPort|Source port (when UDP/TCP/SCTP)|srcport|Included|L4_DST_PORT (11)|destinationTransportPort (11)| +|DstPort|Destination port (when UDP/TCP/SCTP)|dstport|Included|L4_SRC_PORT (7)|sourceTransportPort (7)| +|SrcIf|Source interface|input|Included|INPUT_SNMP (10)|ingressInterface (10)| +|DstIf|Destination interface|output|Included|OUTPUT_SNMP (14)|egressInterface (14)| +|SrcMac|Source mac address| |Included|IN_SRC_MAC (56)|sourceMacAddress (56)| +|DstMac|Destination mac address| |Included|OUT_DST_MAC (57)|postDestinationMacAddress (57)| +|SrcVlan|Source VLAN ID| |From ExtendedSwitch|SRC_VLAN (59)|vlanId (58)| +|DstVlan|Destination VLAN ID| |From ExtendedSwitch|DST_VLAN (59)|postVlanId (59)| +|VlanId|802.11q VLAN ID| |Included|SRC_VLAN (59)|postVlanId (59)| +|IngressVrfID|VRF ID| | | |ingressVRFID (234)| +|EgressVrfID|VRF ID| | | |egressVRFID (235)| +|IPTos|IP Type of Service|tos|Included|SRC_TOS (5)|ipClassOfService (5)| +|ForwardingStatus|Forwarding status| | |FORWARDING_STATUS (89)|forwardingStatus (89)| +|IPTTL|IP Time to Live| |Included|IPTTL (52)|minimumTTL (52| +|TCPFlags|TCP flags|tcp_flags|Included|TCP_FLAGS (6)|tcpControlBits (6)| +|IcmpType|ICMP Type| |Included|ICMP_TYPE (32)|icmpTypeXXX (176, 178) icmpTypeCodeXXX (32, 139)| +|IcmpCode|ICMP Code| |Included|ICMP_TYPE (32)|icmpCodeXXX (177, 179) icmpTypeCodeXXX (32, 139)| +|IPv6FlowLabel|IPv6 Flow Label| |Included|IPV6_FLOW_LABEL (31)|flowLabelIPv6 (31)| +|FragmentId|IP Fragment ID| |Included|IPV4_IDENT (54)|fragmentIdentification (54)| +|FragmentOffset|IP Fragment Offset| |Included|FRAGMENT_OFFSET (88)|fragmentOffset (88)| +|BiFlowDirection|BiFlow Identification| | | |biflowDirection (239)| +|SrcAS|Source AS number|src_as|From ExtendedGateway|SRC_AS (16)|bgpSourceAsNumber (16)| +|DstAS|Destination AS number|dst_as|From ExtendedGateway|DST_AS (17)|bgpDestinationAsNumber (17)| +|NextHop|Nexthop address|nexthop|From ExtendedGateway|IPV4_NEXT_HOP (15) BGP_IPV4_NEXT_HOP (18) IPV6_NEXT_HOP (62) BGP_IPV6_NEXT_HOP (63)|ipNextHopIPv4Address (15) bgpNextHopIPv4Address (18) ipNextHopIPv6Address (62) bgpNextHopIPv6Address (63)| +|NextHopAS|Nexthop AS number| |From ExtendedGateway| | | +|SrcNet|Source address mask|src_mask|From ExtendedRouter|SRC_MASK (9) IPV6_SRC_MASK (29)|sourceIPv4PrefixLength (9) sourceIPv6PrefixLength (29)| +|DstNet|Destination address mask|dst_mask|From ExtendedRouter|DST_MASK (13) IPV6_DST_MASK (30)|destinationIPv4PrefixLength (13) destinationIPv6PrefixLength (30)| + +If you are implementing flow processors to add more data to the protobuf, +we suggest you use field IDs ≥ 1000. ### Implementation notes -At Cloudflare, we aggregate the flows in Flink using a +The pipeline at Cloudflare is connecting collectors with flow processors +that will add more information: with IP address, add country, ASN, etc. + +For aggregation, we are using Materialized tables in Clickhouse. +Dictionaries help correlating flows with country and ASNs. +A few collectors can treat hundred of thousands of samples. + +We also experimented successfully flow aggregation with Flink using a [Keyed Session Window](https://ci.apache.org/projects/flink/flink-docs-release-1.4/dev/stream/operators/windows.html#session-windows): this sums the `Bytes x SamplingRate` and `Packets x SamplingRate` received during a 5 minutes **window** while allowing 2 more minutes in the case where some flows were delayed before closing the **session**. The BGP information provided by routers can be unreliable (if the router does not have a BGP full-table or it is a static route). You can use Maxmind [prefix to ASN](https://dev.maxmind.com/geoip/geoip2/geolite2/) in order to solve this issue. -We also gather the next-hops ASN using a custom BGP collector using [fgbgp library](https://github.com/cloudflare/fgbgp). ## License diff --git a/cmd/cnetflow/cnetflow.go b/cmd/cnetflow/cnetflow.go new file mode 100644 index 0000000..c2ef8cb --- /dev/null +++ b/cmd/cnetflow/cnetflow.go @@ -0,0 +1,86 @@ +package main + +import ( + "flag" + "fmt" + "github.com/cloudflare/goflow/transport" + "github.com/cloudflare/goflow/utils" + log "github.com/sirupsen/logrus" + "os" + "runtime" + + "github.com/prometheus/client_golang/prometheus/promhttp" + "net/http" +) + +var ( + version = "" + AppVersion = "GoFlow NetFlow " + version + + Addr = flag.String("addr", "", "NetFlow/IPFIX listening address") + Port = flag.Int("port", 2055, "NetFlow/IPFIX listening port") + + Workers = flag.Int("workers", 1, "Number of NetFlow workers") + LogLevel = flag.String("loglevel", "info", "Log level") + LogFmt = flag.String("logfmt", "normal", "Log formatter") + + EnableKafka = flag.Bool("kafka", true, "Enable Kafka") + MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") + MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") + TemplatePath = flag.String("templates.path", "/templates", "NetFlow/IPFIX templates list") + + Version = flag.Bool("v", false, "Print version") +) + +func init() { + transport.RegisterFlags() +} + +func httpServer(state *utils.StateNetFlow) { + http.Handle(*MetricsPath, promhttp.Handler()) + http.HandleFunc(*TemplatePath, state.ServeHTTPTemplates) + log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) +} + +func main() { + flag.Parse() + + if *Version { + fmt.Println(AppVersion) + os.Exit(0) + } + + lvl, _ := log.ParseLevel(*LogLevel) + log.SetLevel(lvl) + switch *LogFmt { + case "json": + log.SetFormatter(&log.JSONFormatter{}) + } + + runtime.GOMAXPROCS(runtime.NumCPU()) + + log.Info("Starting GoFlow") + + s := &utils.StateNetFlow{ + Transport: &utils.DefaultLogTransport{}, + Logger: log.StandardLogger(), + } + + go httpServer(s) + + if *EnableKafka { + kafkaState, err := transport.StartKafkaProducerFromArgs(log.StandardLogger()) + if err != nil { + log.Fatal(err) + } + s.Transport = kafkaState + } + log.WithFields(log.Fields{ + "Type": "NetFlow"}). + Infof("Listening on UDP %v:%v", *Addr, *Port) + + err := s.FlowRoutine(*Workers, *Addr, *Port) + if err != nil { + log.Fatalf("Fatal error: could not listen to UDP (%v)", err) + } +} diff --git a/cmd/cnflegacy/cnflegacy.go b/cmd/cnflegacy/cnflegacy.go new file mode 100644 index 0000000..30f1d18 --- /dev/null +++ b/cmd/cnflegacy/cnflegacy.go @@ -0,0 +1,84 @@ +package main + +import ( + "flag" + "fmt" + "github.com/cloudflare/goflow/transport" + "github.com/cloudflare/goflow/utils" + log "github.com/sirupsen/logrus" + "os" + "runtime" + + "github.com/prometheus/client_golang/prometheus/promhttp" + "net/http" +) + +var ( + version = "" + AppVersion = "GoFlow NetFlowv5 " + version + + Addr = flag.String("addr", "", "NetFlow v5 listening address") + Port = flag.Int("port", 2055, "NetFlow v5 listening port") + + Workers = flag.Int("workers", 1, "Number of NetFlow v5 workers") + LogLevel = flag.String("loglevel", "info", "Log level") + LogFmt = flag.String("logfmt", "normal", "Log formatter") + + EnableKafka = flag.Bool("kafka", true, "Enable Kafka") + MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") + MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") + + Version = flag.Bool("v", false, "Print version") +) + +func init() { + transport.RegisterFlags() +} + +func httpServer() { + http.Handle(*MetricsPath, promhttp.Handler()) + log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) +} + +func main() { + flag.Parse() + + if *Version { + fmt.Println(AppVersion) + os.Exit(0) + } + + lvl, _ := log.ParseLevel(*LogLevel) + log.SetLevel(lvl) + switch *LogFmt { + case "json": + log.SetFormatter(&log.JSONFormatter{}) + } + + runtime.GOMAXPROCS(runtime.NumCPU()) + + log.Info("Starting GoFlow") + + s := &utils.StateNFLegacy{ + Transport: &utils.DefaultLogTransport{}, + Logger: log.StandardLogger(), + } + + go httpServer() + + if *EnableKafka { + kafkaState, err := transport.StartKafkaProducerFromArgs(log.StandardLogger()) + if err != nil { + log.Fatal(err) + } + s.Transport = kafkaState + } + log.WithFields(log.Fields{ + "Type": "NetFlowLegacy"}). + Infof("Listening on UDP %v:%v", *Addr, *Port) + + err := s.FlowRoutine(*Workers, *Addr, *Port) + if err != nil { + log.Fatalf("Fatal error: could not listen to UDP (%v)", err) + } +} diff --git a/cmd/csflow/csflow.go b/cmd/csflow/csflow.go new file mode 100644 index 0000000..bde0ad0 --- /dev/null +++ b/cmd/csflow/csflow.go @@ -0,0 +1,84 @@ +package main + +import ( + "flag" + "fmt" + "github.com/cloudflare/goflow/transport" + "github.com/cloudflare/goflow/utils" + log "github.com/sirupsen/logrus" + "os" + "runtime" + + "github.com/prometheus/client_golang/prometheus/promhttp" + "net/http" +) + +var ( + version = "" + AppVersion = "GoFlow sFlow " + version + + Addr = flag.String("addr", "", "sFlow listening address") + Port = flag.Int("port", 6343, "sFlow listening port") + + Workers = flag.Int("workers", 1, "Number of sFlow workers") + LogLevel = flag.String("loglevel", "info", "Log level") + LogFmt = flag.String("logfmt", "normal", "Log formatter") + + EnableKafka = flag.Bool("kafka", true, "Enable Kafka") + MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") + MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") + + Version = flag.Bool("v", false, "Print version") +) + +func init() { + transport.RegisterFlags() +} + +func httpServer() { + http.Handle(*MetricsPath, promhttp.Handler()) + log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) +} + +func main() { + flag.Parse() + + if *Version { + fmt.Println(AppVersion) + os.Exit(0) + } + + lvl, _ := log.ParseLevel(*LogLevel) + log.SetLevel(lvl) + switch *LogFmt { + case "json": + log.SetFormatter(&log.JSONFormatter{}) + } + + runtime.GOMAXPROCS(runtime.NumCPU()) + + log.Info("Starting GoFlow") + + s := &utils.StateSFlow{ + Transport: &utils.DefaultLogTransport{}, + Logger: log.StandardLogger(), + } + + go httpServer() + + if *EnableKafka { + kafkaState, err := transport.StartKafkaProducerFromArgs(log.StandardLogger()) + if err != nil { + log.Fatal(err) + } + s.Transport = kafkaState + } + log.WithFields(log.Fields{ + "Type": "sFlow"}). + Infof("Listening on UDP %v:%v", *Addr, *Port) + + err := s.FlowRoutine(*Workers, *Addr, *Port) + if err != nil { + log.Fatalf("Fatal error: could not listen to UDP (%v)", err) + } +} diff --git a/cmd/goflow/goflow.go b/cmd/goflow/goflow.go new file mode 100644 index 0000000..aba776c --- /dev/null +++ b/cmd/goflow/goflow.go @@ -0,0 +1,147 @@ +package main + +import ( + "flag" + "fmt" + "github.com/cloudflare/goflow/transport" + "github.com/cloudflare/goflow/utils" + log "github.com/sirupsen/logrus" + "os" + "runtime" + + "github.com/prometheus/client_golang/prometheus/promhttp" + "net/http" + + "sync" +) + +var ( + version = "" + AppVersion = "GoFlow " + version + + SFlowEnable = flag.Bool("sflow", true, "Enable sFlow") + SFlowAddr = flag.String("sflow.addr", "", "sFlow listening address") + SFlowPort = flag.Int("sflow.port", 6343, "sFlow listening port") + + NFLEnable = flag.Bool("nfl", true, "Enable NetFlow v5") + NFLAddr = flag.String("nfl.addr", "", "NetFlow v5 listening address") + NFLPort = flag.Int("nfl.port", 2056, "NetFlow v5 listening port") + + NFEnable = flag.Bool("nf", true, "Enable NetFlow/IPFIX") + NFAddr = flag.String("nf.addr", "", "NetFlow/IPFIX listening address") + NFPort = flag.Int("nf.port", 2055, "NetFlow/IPFIX listening port") + + Workers = flag.Int("workers", 1, "Number of workers per collector") + LogLevel = flag.String("loglevel", "info", "Log level") + LogFmt = flag.String("logfmt", "normal", "Log formatter") + + EnableKafka = flag.Bool("kafka", true, "Enable Kafka") + MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") + MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") + + TemplatePath = flag.String("templates.path", "/templates", "NetFlow/IPFIX templates list") + + Version = flag.Bool("v", false, "Print version") +) + +func init() { + transport.RegisterFlags() +} + +func httpServer(state *utils.StateNetFlow) { + http.Handle(*MetricsPath, promhttp.Handler()) + http.HandleFunc(*TemplatePath, state.ServeHTTPTemplates) + log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) +} + +func main() { + flag.Parse() + + if *Version { + fmt.Println(AppVersion) + os.Exit(0) + } + + lvl, _ := log.ParseLevel(*LogLevel) + log.SetLevel(lvl) + switch *LogFmt { + case "json": + log.SetFormatter(&log.JSONFormatter{}) + } + + runtime.GOMAXPROCS(runtime.NumCPU()) + + log.Info("Starting GoFlow") + + defaultTransport := &utils.DefaultLogTransport{} + + sSFlow := &utils.StateSFlow{ + Transport: defaultTransport, + Logger: log.StandardLogger(), + } + sNF := &utils.StateNetFlow{ + Transport: defaultTransport, + Logger: log.StandardLogger(), + } + sNFL := &utils.StateNFLegacy{ + Transport: defaultTransport, + Logger: log.StandardLogger(), + } + + go httpServer(sNF) + + if *EnableKafka { + kafkaState, err := transport.StartKafkaProducerFromArgs(log.StandardLogger()) + if err != nil { + log.Fatal(err) + } + sSFlow.Transport = kafkaState + sNFL.Transport = kafkaState + sNF.Transport = kafkaState + } + + wg := &sync.WaitGroup{} + if *SFlowEnable { + wg.Add(1) + go func() { + log.WithFields(log.Fields{ + "Type": "sFlow"}). + Infof("Listening on UDP %v:%v", *SFlowAddr, *SFlowPort) + + err := sSFlow.FlowRoutine(*Workers, *SFlowAddr, *SFlowPort) + if err != nil { + log.Fatalf("Fatal error: could not listen to UDP (%v)", err) + } + wg.Done() + }() + } + if *NFEnable { + wg.Add(1) + go func() { + log.WithFields(log.Fields{ + "Type": "NetFlow"}). + Infof("Listening on UDP %v:%v", *NFAddr, *NFPort) + + err := sNF.FlowRoutine(*Workers, *NFAddr, *NFPort) + if err != nil { + log.Fatalf("Fatal error: could not listen to UDP (%v)", err) + } + wg.Done() + }() + } + if *NFLEnable { + wg.Add(1) + go func() { + log.WithFields(log.Fields{ + "Type": "NetFlowLegacy"}). + Infof("Listening on UDP %v:%v", *NFLAddr, *NFLPort) + + err := sNFL.FlowRoutine(*Workers, *NFLAddr, *NFLPort) + if err != nil { + log.Fatalf("Fatal error: could not listen to UDP (%v)", err) + } + wg.Done() + }() + } + wg.Wait() +} diff --git a/decoders/decoder.go b/decoders/decoder.go index 2c13f56..edfb1b8 100644 --- a/decoders/decoder.go +++ b/decoders/decoder.go @@ -1,7 +1,6 @@ package decoder import ( - //log "github.com/Sirupsen/logrus" "time" ) diff --git a/decoders/netflow/ipfix.go b/decoders/netflow/ipfix.go index 115932c..954b7d3 100644 --- a/decoders/netflow/ipfix.go +++ b/decoders/netflow/ipfix.go @@ -981,7 +981,7 @@ func (p IPFIXPacket) String() string { str += fmt.Sprintf(" - OptionsDataFlowSet %v:\n", i) str += flowSet.String(IPFIXTypeToString, IPFIXTypeToString) default: - str += fmt.Sprintf(" - (unknown type) %v:\n", i, flowSet) + str += fmt.Sprintf(" - (unknown type) %v: %v\n", i, flowSet) } } diff --git a/decoders/netflow/nfv9.go b/decoders/netflow/nfv9.go index a08d056..d46f106 100644 --- a/decoders/netflow/nfv9.go +++ b/decoders/netflow/nfv9.go @@ -310,7 +310,7 @@ func (p NFv9Packet) String() string { str += fmt.Sprintf(" - OptionsDataFlowSet %v:\n", i) str += flowSet.String(NFv9TypeToString, NFv9ScopeToString) default: - str += fmt.Sprintf(" - (unknown type) %v:\n", i, flowSet) + str += fmt.Sprintf(" - (unknown type) %v: %v\n", i, flowSet) } } return str diff --git a/decoders/netflowlegacy/netflow.go b/decoders/netflowlegacy/netflow.go new file mode 100644 index 0000000..93741fa --- /dev/null +++ b/decoders/netflowlegacy/netflow.go @@ -0,0 +1,52 @@ +package netflowlegacy + +import ( + "bytes" + "fmt" + "github.com/cloudflare/goflow/decoders/utils" +) + +type ErrorVersion struct { + version uint16 +} + +func NewErrorVersion(version uint16) *ErrorVersion { + return &ErrorVersion{ + version: version, + } +} + +func (e *ErrorVersion) Error() string { + return fmt.Sprintf("Unknown NetFlow version %v (only decodes v5)", e.version) +} + +func DecodeMessage(payload *bytes.Buffer) (interface{}, error) { + var version uint16 + utils.BinaryDecoder(payload, &version) + packet := PacketNetFlowV5{} + if version == 5 { + packet.Version = version + + utils.BinaryDecoder(payload, + &(packet.Count), + &(packet.SysUptime), + &(packet.UnixSecs), + &(packet.UnixNSecs), + &(packet.FlowSequence), + &(packet.EngineType), + &(packet.EngineId), + &(packet.SamplingInterval), + ) + + packet.Records = make([]RecordsNetFlowV5, int(packet.Count)) + for i := 0; i < int(packet.Count) && payload.Len() >= 48; i++ { + record := RecordsNetFlowV5{} + utils.BinaryDecoder(payload, &record) + packet.Records[i] = record + } + + return packet, nil + } else { + return nil, NewErrorVersion(version) + } +} diff --git a/decoders/netflowlegacy/netflow_test.go b/decoders/netflowlegacy/netflow_test.go new file mode 100644 index 0000000..a72f178 --- /dev/null +++ b/decoders/netflowlegacy/netflow_test.go @@ -0,0 +1,40 @@ +package netflowlegacy + +import ( + "bytes" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestDecodeNetFlowV5(t *testing.T) { + data := []byte{ + 0x00, 0x05, 0x00, 0x06, 0x00, 0x82, 0xc3, 0x48, 0x5b, 0xcd, 0xba, 0x1b, 0x05, 0x97, 0x6d, 0xc7, + 0x00, 0x00, 0x64, 0x3d, 0x08, 0x08, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x79, 0x0a, 0x80, 0x02, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x02, 0x4e, + 0x00, 0x82, 0x9b, 0x8c, 0x00, 0x82, 0x9b, 0x90, 0x1f, 0x90, 0xb9, 0x18, 0x00, 0x1b, 0x06, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x77, 0x0a, 0x81, 0x02, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x94, + 0x00, 0x82, 0x95, 0xa9, 0x00, 0x82, 0x9a, 0xfb, 0x1f, 0x90, 0xc1, 0x2c, 0x00, 0x12, 0x06, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x81, 0x02, 0x01, 0x0a, 0x80, 0x02, 0x77, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xc2, + 0x00, 0x82, 0x95, 0xa9, 0x00, 0x82, 0x9a, 0xfc, 0xc1, 0x2c, 0x1f, 0x90, 0x00, 0x16, 0x06, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x01, 0x0a, 0x80, 0x02, 0x79, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x09, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x01, 0xf1, + 0x00, 0x82, 0x9b, 0x8c, 0x00, 0x82, 0x9b, 0x8f, 0xb9, 0x18, 0x1f, 0x90, 0x00, 0x1b, 0x06, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x01, 0x0a, 0x80, 0x02, 0x79, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x09, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x02, 0x2e, + 0x00, 0x82, 0x9b, 0x90, 0x00, 0x82, 0x9b, 0x9d, 0xb9, 0x1a, 0x1f, 0x90, 0x00, 0x1b, 0x06, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x79, 0x0a, 0x80, 0x02, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x0b, 0xac, + 0x00, 0x82, 0x9b, 0x90, 0x00, 0x82, 0x9b, 0x9d, 0x1f, 0x90, 0xb9, 0x1a, 0x00, 0x1b, 0x06, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + } + buf := bytes.NewBuffer(data) + + dec, err := DecodeMessage(buf) + assert.Nil(t, err) + assert.NotNil(t, dec) + decNfv5 := dec.(PacketNetFlowV5) + assert.Equal(t, uint16(5), decNfv5.Version) + assert.Equal(t, uint16(9), decNfv5.Records[0].Input) +} diff --git a/decoders/netflowlegacy/packet.go b/decoders/netflowlegacy/packet.go new file mode 100644 index 0000000..078bba4 --- /dev/null +++ b/decoders/netflowlegacy/packet.go @@ -0,0 +1,96 @@ +package netflowlegacy + +import ( + "encoding/binary" + "fmt" + "net" + "time" +) + +type PacketNetFlowV5 struct { + Version uint16 + Count uint16 + SysUptime uint32 + UnixSecs uint32 + UnixNSecs uint32 + FlowSequence uint32 + EngineType uint8 + EngineId uint8 + SamplingInterval uint16 + Records []RecordsNetFlowV5 +} + +type RecordsNetFlowV5 struct { + SrcAddr uint32 + DstAddr uint32 + NextHop uint32 + Input uint16 + Output uint16 + DPkts uint32 + DOctets uint32 + First uint32 + Last uint32 + SrcPort uint16 + DstPort uint16 + Pad1 byte + TCPFlags uint8 + Proto uint8 + Tos uint8 + SrcAS uint16 + DstAS uint16 + SrcMask uint8 + DstMask uint8 + Pad2 uint16 +} + +func (p PacketNetFlowV5) String() string { + str := "NetFlow v5 Packet\n" + str += "-----------------\n" + str += fmt.Sprintf(" Version: %v\n", p.Version) + str += fmt.Sprintf(" Count: %v\n", p.Count) + + unixSeconds := time.Unix(int64(p.UnixSecs), int64(p.UnixNSecs)) + str += fmt.Sprintf(" SystemUptime: %v\n", time.Duration(p.SysUptime)*time.Millisecond) + str += fmt.Sprintf(" UnixSeconds: %v\n", unixSeconds.String()) + str += fmt.Sprintf(" FlowSequence: %v\n", p.FlowSequence) + str += fmt.Sprintf(" EngineType: %v\n", p.EngineType) + str += fmt.Sprintf(" EngineId: %v\n", p.EngineId) + str += fmt.Sprintf(" SamplingInterval: %v\n", p.SamplingInterval) + str += fmt.Sprintf(" Records (%v):\n", len(p.Records)) + + for i, record := range p.Records { + str += fmt.Sprintf(" Record %v:\n", i) + str += record.String() + } + return str +} + +func (r RecordsNetFlowV5) String() string { + srcaddr := make(net.IP, 4) + binary.BigEndian.PutUint32(srcaddr, r.SrcAddr) + dstaddr := make(net.IP, 4) + binary.BigEndian.PutUint32(dstaddr, r.DstAddr) + nexthop := make(net.IP, 4) + binary.BigEndian.PutUint32(nexthop, r.NextHop) + + str := fmt.Sprintf(" SrcAddr: %v\n", srcaddr.String()) + str += fmt.Sprintf(" DstAddr: %v\n", dstaddr.String()) + str += fmt.Sprintf(" NextHop: %v\n", nexthop.String()) + str += fmt.Sprintf(" Input: %v\n", r.Input) + str += fmt.Sprintf(" Output: %v\n", r.Output) + str += fmt.Sprintf(" DPkts: %v\n", r.DPkts) + str += fmt.Sprintf(" DOctets: %v\n", r.DOctets) + str += fmt.Sprintf(" First: %v\n", time.Duration(r.First)*time.Millisecond) + str += fmt.Sprintf(" Last: %v\n", time.Duration(r.Last)*time.Millisecond) + str += fmt.Sprintf(" SrcPort: %v\n", r.SrcPort) + str += fmt.Sprintf(" DstPort: %v\n", r.DstPort) + str += fmt.Sprintf(" TCPFlags: %v\n", r.TCPFlags) + str += fmt.Sprintf(" Proto: %v\n", r.Proto) + str += fmt.Sprintf(" Tos: %v\n", r.Tos) + str += fmt.Sprintf(" SrcAS: %v\n", r.SrcAS) + str += fmt.Sprintf(" DstAS: %v\n", r.DstAS) + str += fmt.Sprintf(" SrcMask: %v\n", r.SrcMask) + str += fmt.Sprintf(" DstMask: %v\n", r.DstMask) + + return str +} diff --git a/decoders/sflow/sflow.go b/decoders/sflow/sflow.go index 4b77c3e..a8ef311 100644 --- a/decoders/sflow/sflow.go +++ b/decoders/sflow/sflow.go @@ -2,6 +2,7 @@ package sflow import ( "bytes" + "errors" "fmt" "github.com/cloudflare/goflow/decoders/utils" ) @@ -118,11 +119,17 @@ func DecodeFlowRecord(header *RecordHeader, payload *bytes.Buffer) (FlowRecord, switch (*header).DataFormat { case FORMAT_EXT_SWITCH: extendedSwitch := ExtendedSwitch{} - utils.BinaryDecoder(payload, &extendedSwitch) + err := utils.BinaryDecoder(payload, &extendedSwitch) + if err != nil { + return flowRecord, err + } flowRecord.Data = extendedSwitch case FORMAT_RAW_PKT: sampledHeader := SampledHeader{} - utils.BinaryDecoder(payload, &(sampledHeader.Protocol), &(sampledHeader.FrameLength), &(sampledHeader.Stripped), &(sampledHeader.OriginalLength)) + err := utils.BinaryDecoder(payload, &(sampledHeader.Protocol), &(sampledHeader.FrameLength), &(sampledHeader.Stripped), &(sampledHeader.OriginalLength)) + if err != nil { + return flowRecord, err + } sampledHeader.HeaderData = payload.Bytes() flowRecord.Data = sampledHeader case FORMAT_IPV4: @@ -130,22 +137,34 @@ func DecodeFlowRecord(header *RecordHeader, payload *bytes.Buffer) (FlowRecord, SrcIP: make([]byte, 4), DstIP: make([]byte, 4), } - utils.BinaryDecoder(payload, &sampledIPBase) + err := utils.BinaryDecoder(payload, &sampledIPBase) + if err != nil { + return flowRecord, err + } sampledIPv4 := SampledIPv4{ Base: sampledIPBase, } - utils.BinaryDecoder(payload, &(sampledIPv4.Tos)) + err = utils.BinaryDecoder(payload, &(sampledIPv4.Tos)) + if err != nil { + return flowRecord, err + } flowRecord.Data = sampledIPv4 case FORMAT_IPV6: sampledIPBase := SampledIP_Base{ SrcIP: make([]byte, 16), DstIP: make([]byte, 16), } - utils.BinaryDecoder(payload, &sampledIPBase) + err := utils.BinaryDecoder(payload, &sampledIPBase) + if err != nil { + return flowRecord, err + } sampledIPv6 := SampledIPv6{ Base: sampledIPBase, } - utils.BinaryDecoder(payload, &(sampledIPv6.Priority)) + err = utils.BinaryDecoder(payload, &(sampledIPv6.Priority)) + if err != nil { + return flowRecord, err + } flowRecord.Data = sampledIPv6 case FORMAT_EXT_ROUTER: extendedRouter := ExtendedRouter{} @@ -156,7 +175,10 @@ func DecodeFlowRecord(header *RecordHeader, payload *bytes.Buffer) (FlowRecord, } extendedRouter.NextHopIPVersion = ipVersion extendedRouter.NextHop = ip - utils.BinaryDecoder(payload, &(extendedRouter.SrcMaskLen), &(extendedRouter.DstMaskLen)) + err = utils.BinaryDecoder(payload, &(extendedRouter.SrcMaskLen), &(extendedRouter.DstMaskLen)) + if err != nil { + return flowRecord, err + } flowRecord.Data = extendedRouter case FORMAT_EXT_GATEWAY: extendedGateway := ExtendedGateway{} @@ -166,35 +188,53 @@ func DecodeFlowRecord(header *RecordHeader, payload *bytes.Buffer) (FlowRecord, } extendedGateway.NextHopIPVersion = ipVersion extendedGateway.NextHop = ip - utils.BinaryDecoder(payload, &(extendedGateway.AS), &(extendedGateway.SrcAS), &(extendedGateway.SrcPeerAS), + err = utils.BinaryDecoder(payload, &(extendedGateway.AS), &(extendedGateway.SrcAS), &(extendedGateway.SrcPeerAS), &(extendedGateway.ASDestinations)) + if err != nil { + return flowRecord, err + } asPath := make([]uint32, 0) if extendedGateway.ASDestinations != 0 { - utils.BinaryDecoder(payload, &(extendedGateway.ASPathType), &(extendedGateway.ASPathLength)) + err := utils.BinaryDecoder(payload, &(extendedGateway.ASPathType), &(extendedGateway.ASPathLength)) + if err != nil { + return flowRecord, err + } if int(extendedGateway.ASPathLength) > payload.Len()-4 { - return flowRecord, NewErrorDecodingSFlow(fmt.Sprintf("Invalid AS path length.", extendedGateway.ASPathLength)) + return flowRecord, errors.New(fmt.Sprintf("Invalid AS path length: %v.", extendedGateway.ASPathLength)) } asPath = make([]uint32, extendedGateway.ASPathLength) if len(asPath) > 0 { - utils.BinaryDecoder(payload, asPath) + err = utils.BinaryDecoder(payload, asPath) + if err != nil { + return flowRecord, err + } } } extendedGateway.ASPath = asPath - utils.BinaryDecoder(payload, &(extendedGateway.CommunitiesLength)) + err = utils.BinaryDecoder(payload, &(extendedGateway.CommunitiesLength)) + if err != nil { + return flowRecord, err + } if int(extendedGateway.CommunitiesLength) > payload.Len()-4 { - return flowRecord, NewErrorDecodingSFlow(fmt.Sprintf("Invalid Communities length.", extendedGateway.ASPathLength)) + return flowRecord, errors.New(fmt.Sprintf("Invalid Communities length: %v.", extendedGateway.ASPathLength)) } communities := make([]uint32, extendedGateway.CommunitiesLength) if len(communities) > 0 { - utils.BinaryDecoder(payload, communities) + err = utils.BinaryDecoder(payload, communities) + if err != nil { + return flowRecord, err + } + } + err = utils.BinaryDecoder(payload, &(extendedGateway.LocalPref)) + if err != nil { + return flowRecord, err } - utils.BinaryDecoder(payload, &(extendedGateway.LocalPref)) extendedGateway.Communities = communities flowRecord.Data = extendedGateway default: - return flowRecord, NewErrorDecodingSFlow(fmt.Sprintf("Unknown data format %v.", (*header).DataFormat)) + return flowRecord, errors.New(fmt.Sprintf("Unknown data format %v.", (*header).DataFormat)) } return flowRecord, nil } @@ -203,15 +243,24 @@ func DecodeSample(header *SampleHeader, payload *bytes.Buffer) (interface{}, err format := (*header).Format var sample interface{} - utils.BinaryDecoder(payload, &((*header).SampleSequenceNumber)) + err := utils.BinaryDecoder(payload, &((*header).SampleSequenceNumber)) + if err != nil { + return sample, err + } if format == FORMAT_RAW_PKT || format == FORMAT_ETH { var sourceId uint32 - utils.BinaryDecoder(payload, &sourceId) + err = utils.BinaryDecoder(payload, &sourceId) + if err != nil { + return sample, err + } (*header).SourceIdType = sourceId >> 24 (*header).SourceIdValue = sourceId & 0x00ffffff } else if format == FORMAT_IPV4 || format == FORMAT_IPV6 { - utils.BinaryDecoder(payload, &((*header).SourceIdType), &((*header).SourceIdValue)) + err = utils.BinaryDecoder(payload, &((*header).SourceIdType), &((*header).SourceIdValue)) + if err != nil { + return sample, err + } } else { return nil, NewErrorDataFormat(format) } @@ -224,13 +273,19 @@ func DecodeSample(header *SampleHeader, payload *bytes.Buffer) (interface{}, err flowSample = FlowSample{ Header: *header, } - utils.BinaryDecoder(payload, &(flowSample.SamplingRate), &(flowSample.SamplePool), + err = utils.BinaryDecoder(payload, &(flowSample.SamplingRate), &(flowSample.SamplePool), &(flowSample.Drops), &(flowSample.Input), &(flowSample.Output), &(flowSample.FlowRecordsCount)) + if err != nil { + return sample, err + } recordsCount = flowSample.FlowRecordsCount flowSample.Records = make([]FlowRecord, recordsCount) sample = flowSample } else if format == FORMAT_ETH || format == FORMAT_IPV6 { - utils.BinaryDecoder(payload, &recordsCount) + err = utils.BinaryDecoder(payload, &recordsCount) + if err != nil { + return sample, err + } counterSample = CounterSample{ Header: *header, CounterRecordsCount: recordsCount, @@ -241,16 +296,22 @@ func DecodeSample(header *SampleHeader, payload *bytes.Buffer) (interface{}, err expandedFlowSample = ExpandedFlowSample{ Header: *header, } - utils.BinaryDecoder(payload, &(expandedFlowSample.SamplingRate), &(expandedFlowSample.SamplePool), + err = utils.BinaryDecoder(payload, &(expandedFlowSample.SamplingRate), &(expandedFlowSample.SamplePool), &(expandedFlowSample.Drops), &(expandedFlowSample.InputIfFormat), &(expandedFlowSample.InputIfValue), &(expandedFlowSample.OutputIfFormat), &(expandedFlowSample.OutputIfValue), &(expandedFlowSample.FlowRecordsCount)) + if err != nil { + return sample, err + } recordsCount = expandedFlowSample.FlowRecordsCount expandedFlowSample.Records = make([]FlowRecord, recordsCount) sample = expandedFlowSample } for i := 0; i < int(recordsCount) && payload.Len() >= 8; i++ { recordHeader := RecordHeader{} - utils.BinaryDecoder(payload, &(recordHeader.DataFormat), &(recordHeader.Length)) + err = utils.BinaryDecoder(payload, &(recordHeader.DataFormat), &(recordHeader.Length)) + if err != nil { + return sample, err + } if int(recordHeader.Length) > payload.Len() { break } @@ -278,28 +339,43 @@ func DecodeSample(header *SampleHeader, payload *bytes.Buffer) (interface{}, err func DecodeMessage(payload *bytes.Buffer) (interface{}, error) { var version uint32 - utils.BinaryDecoder(payload, &version) + err := utils.BinaryDecoder(payload, &version) + if err != nil { + return nil, err + } packetV5 := Packet{} if version == 5 { packetV5.Version = version - utils.BinaryDecoder(payload, &(packetV5.IPVersion)) + err = utils.BinaryDecoder(payload, &(packetV5.IPVersion)) + if err != nil { + return packetV5, err + } var ip []byte if packetV5.IPVersion == 1 { ip = make([]byte, 4) utils.BinaryDecoder(payload, ip) } else if packetV5.IPVersion == 2 { ip = make([]byte, 16) - utils.BinaryDecoder(payload, ip) + err = utils.BinaryDecoder(payload, ip) + if err != nil { + return packetV5, err + } } else { return nil, NewErrorIPVersion(packetV5.IPVersion) } packetV5.AgentIP = ip - utils.BinaryDecoder(payload, &(packetV5.SubAgentId), &(packetV5.SequenceNumber), &(packetV5.Uptime), &(packetV5.SamplesCount)) + err = utils.BinaryDecoder(payload, &(packetV5.SubAgentId), &(packetV5.SequenceNumber), &(packetV5.Uptime), &(packetV5.SamplesCount)) + if err != nil { + return packetV5, err + } packetV5.Samples = make([]interface{}, int(packetV5.SamplesCount)) for i := 0; i < int(packetV5.SamplesCount) && payload.Len() >= 8; i++ { header := SampleHeader{} - utils.BinaryDecoder(payload, &(header.Format), &(header.Length)) + err = utils.BinaryDecoder(payload, &(header.Format), &(header.Length)) + if err != nil { + return packetV5, err + } if int(header.Length) > payload.Len() { break } @@ -317,5 +393,4 @@ func DecodeMessage(payload *bytes.Buffer) (interface{}, error) { } else { return nil, NewErrorVersion(version) } - return nil, nil } diff --git a/decoders/sflow/sflow_test.go b/decoders/sflow/sflow_test.go new file mode 100644 index 0000000..68e0291 --- /dev/null +++ b/decoders/sflow/sflow_test.go @@ -0,0 +1,27 @@ +package sflow + +import ( + "bytes" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSFlowDecode(t *testing.T) { + data := []byte{ + 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0xac, 0x10, 0x00, 0x11, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x01, 0xaa, 0x67, 0xee, 0xaa, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x04, 0x13, 0x00, 0x00, 0x08, 0x00, + 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xaa, 0x00, 0x00, 0x04, 0x13, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x4e, 0x00, 0xff, 0x12, 0x34, + 0x35, 0x1b, 0xff, 0xab, 0xcd, 0xef, 0xab, 0x64, 0x81, 0x00, 0x00, 0x20, 0x08, 0x00, 0x45, 0x00, + 0x00, 0x3c, 0x5c, 0x07, 0x00, 0x00, 0x7c, 0x01, 0x48, 0xa0, 0xac, 0x10, 0x20, 0xfe, 0xac, 0x10, + 0x20, 0xf1, 0x08, 0x00, 0x97, 0x61, 0xa9, 0x48, 0x0c, 0xb2, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, + 0x77, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x00, 0x00, + } + buf := bytes.NewBuffer(data) + _, err := DecodeMessage(buf) + assert.Nil(t, err) + +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..39a30bc --- /dev/null +++ b/go.mod @@ -0,0 +1,12 @@ +module github.com/cloudflare/goflow + +go 1.12 + +require ( + github.com/Shopify/sarama v1.22.0 + github.com/golang/protobuf v1.3.1 + github.com/libp2p/go-reuseport v0.0.1 + github.com/prometheus/client_golang v0.9.2 + github.com/sirupsen/logrus v1.4.1 + github.com/stretchr/testify v1.3.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..c9f5615 --- /dev/null +++ b/go.sum @@ -0,0 +1,67 @@ +github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= +github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Shopify/sarama v1.22.0 h1:rtiODsvY4jW6nUV6n3K+0gx/8WlAwVt+Ixt6RIvpYyo= +github.com/Shopify/sarama v1.22.0/go.mod h1:lm3THZ8reqBDBQKQyb5HB3sY1lKp3grEbQ81aWSgPp4= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/goflow.go b/goflow.go deleted file mode 100644 index 0e6ef94..0000000 --- a/goflow.go +++ /dev/null @@ -1,840 +0,0 @@ -package main - -import ( - "errors" - "flag" - "fmt" - log "github.com/Sirupsen/logrus" - "github.com/cloudflare/goflow/decoders" - "github.com/cloudflare/goflow/decoders/netflow" - "github.com/cloudflare/goflow/decoders/sflow" - flowmessage "github.com/cloudflare/goflow/pb" - "github.com/cloudflare/goflow/producer" - "github.com/cloudflare/goflow/transport" - "net" - "os" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "net/http" - - "encoding/json" - - "bytes" - reuseport "github.com/libp2p/go-reuseport" -) - -const AppVersion = "GoFlow v2.0.7" - -var ( - FEnable = flag.Bool("netflow", true, "Enable NetFlow") - SEnable = flag.Bool("sflow", true, "Enable sFlow") - - FAddr = flag.String("faddr", ":", "NetFlow/IPFIX listening address") - FPort = flag.Int("fport", 2055, "NetFlow/IPFIX listening port") - FReuse = flag.Bool("freuse", false, "Use so_reuseport for NetFlow/IPFIX listening") - - SAddr = flag.String("saddr", ":", "sFlow listening address") - SPort = flag.Int("sport", 6343, "sFlow listening port") - SReuse = flag.Bool("sreuse", false, "Use so_reuseport for sFlow listening") - - FWorkers = flag.Int("fworkers", 1, "Number of NetFlow workers") - SWorkers = flag.Int("sworkers", 1, "Number of sFlow workers") - LogLevel = flag.String("loglevel", "info", "Log level") - LogFmt = flag.String("logfmt", "normal", "Log formatter") - - EnableKafka = flag.Bool("kafka", true, "Enable Kafka") - MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") - MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") - TemplatePath = flag.String("templates.path", "/templates", "NetFlow/IPFIX templates list") - - KafkaTLS = flag.Bool("kafka.tls", false, "Use TLS to connect to Kafka") - KafkaSASL = flag.Bool("kafka.sasl", false, "Use SASL/PLAIN data to connect to Kafka (TLS is recommended and the environment variables KAFKA_SASL_USER and KAFKA_SASL_PASS need to be set)") - KafkaTopic = flag.String("kafka.out.topic", "flow-messages", "Kafka topic to produce to") - KafkaSrv = flag.String("kafka.out.srv", "", "SRV record containing a list of Kafka brokers (or use kafka.out.brokers)") - KafkaBrk = flag.String("kafka.out.brokers", "127.0.0.1:9092,[::1]:9092", "Kafka brokers list separated by commas") - - Version = flag.Bool("v", false, "Print version") -) - -func init() { - initMetrics() -} - -func metricsHTTP() { - http.Handle(*MetricsPath, promhttp.Handler()) - log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) -} - -func templatesHTTP(s *state) { - http.Handle(*TemplatePath, s) -} - -func (s *state) ServeHTTP(w http.ResponseWriter, r *http.Request) { - tmp := make(map[string]map[uint16]map[uint32]map[uint16]interface{}) - s.templateslock.RLock() - for key, templatesrouterstr := range s.templates { - templatesrouter := templatesrouterstr.templates.GetTemplates() - tmp[key] = templatesrouter - } - s.templateslock.RUnlock() - enc := json.NewEncoder(w) - enc.Encode(tmp) -} - -type TemplateSystem struct { - key string - templates *netflow.BasicTemplateSystem -} - -func (s *TemplateSystem) AddTemplate(version uint16, obsDomainId uint32, template interface{}) { - s.templates.AddTemplate(version, obsDomainId, template) - - typeStr := "options_template" - var templateId uint16 - switch templateIdConv := template.(type) { - case netflow.IPFIXOptionsTemplateRecord: - templateId = templateIdConv.TemplateId - case netflow.NFv9OptionsTemplateRecord: - templateId = templateIdConv.TemplateId - case netflow.TemplateRecord: - templateId = templateIdConv.TemplateId - typeStr = "template" - } - NetFlowTemplatesStats.With( - prometheus.Labels{ - "router": s.key, - "version": strconv.Itoa(int(version)), - "obs_domain_id": strconv.Itoa(int(obsDomainId)), - "template_id": strconv.Itoa(int(templateId)), - "type": typeStr, - }). - Inc() -} - -func (s *TemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { - return s.templates.GetTemplate(version, obsDomainId, templateId) -} - -func (s *state) decodeNetFlow(msg interface{}) error { - pkt := msg.(BaseMessage) - buf := bytes.NewBuffer(pkt.Payload) - - key := pkt.Src.String() - routerAddr := pkt.Src - if routerAddr.To4() != nil { - routerAddr = routerAddr.To4() - } - - s.templateslock.RLock() - templates, ok := s.templates[key] - if !ok { - templates = &TemplateSystem{ - templates: netflow.CreateTemplateSystem(), - key: key, - } - s.templates[key] = templates - } - s.templateslock.RUnlock() - s.samplinglock.RLock() - sampling, ok := s.sampling[key] - if !ok { - sampling = producer.CreateSamplingSystem() - s.sampling[key] = sampling - } - s.samplinglock.RUnlock() - - timeTrackStart := time.Now() - msgDec, err := netflow.DecodeMessage(buf, templates) - if err != nil { - switch err.(type) { - case *netflow.ErrorVersion: - NetFlowErrors.With( - prometheus.Labels{ - "router": key, - "error": "error_version", - }). - Inc() - case *netflow.ErrorFlowId: - NetFlowErrors.With( - prometheus.Labels{ - "router": key, - "error": "error_flow_id", - }). - Inc() - case *netflow.ErrorTemplateNotFound: - NetFlowErrors.With( - prometheus.Labels{ - "router": key, - "error": "template_not_found", - }). - Inc() - default: - NetFlowErrors.With( - prometheus.Labels{ - "router": key, - "error": "error_decoding", - }). - Inc() - } - return err - } - - flowMessageSet := make([]*flowmessage.FlowMessage, 0) - - switch msgDecConv := msgDec.(type) { - case netflow.NFv9Packet: - NetFlowStats.With( - prometheus.Labels{ - "router": key, - "version": "9", - }). - Inc() - - for _, fs := range msgDecConv.FlowSets { - switch fsConv := fs.(type) { - case netflow.TemplateFlowSet: - NetFlowSetStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "9", - "type": "TemplateFlowSet", - }). - Inc() - - NetFlowSetRecordsStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "9", - "type": "OptionsTemplateFlowSet", - }). - Add(float64(len(fsConv.Records))) - - case netflow.NFv9OptionsTemplateFlowSet: - NetFlowSetStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "9", - "type": "OptionsTemplateFlowSet", - }). - Inc() - - NetFlowSetRecordsStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "9", - "type": "OptionsTemplateFlowSet", - }). - Add(float64(len(fsConv.Records))) - - case netflow.OptionsDataFlowSet: - NetFlowSetStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "9", - "type": "OptionsDataFlowSet", - }). - Inc() - - NetFlowSetRecordsStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "9", - "type": "OptionsDataFlowSet", - }). - Add(float64(len(fsConv.Records))) - case netflow.DataFlowSet: - NetFlowSetStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "9", - "type": "DataFlowSet", - }). - Inc() - - NetFlowSetRecordsStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "9", - "type": "DataFlowSet", - }). - Add(float64(len(fsConv.Records))) - } - } - flowMessageSet, err = producer.ProcessMessageNetFlow(msgDecConv, sampling) - - for _, fmsg := range flowMessageSet { - fmsg.TimeRecvd = uint64(time.Now().UTC().Unix()) - fmsg.RouterAddr = routerAddr - timeDiff := fmsg.TimeRecvd - fmsg.TimeFlowEnd - NetFlowTimeStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "9", - }). - Observe(float64(timeDiff)) - } - case netflow.IPFIXPacket: - NetFlowStats.With( - prometheus.Labels{ - "router": key, - "version": "10", - }). - Inc() - - for _, fs := range msgDecConv.FlowSets { - switch fsConv := fs.(type) { - case netflow.TemplateFlowSet: - NetFlowSetStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "10", - "type": "TemplateFlowSet", - }). - Inc() - - NetFlowSetRecordsStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "10", - "type": "TemplateFlowSet", - }). - Add(float64(len(fsConv.Records))) - - case netflow.IPFIXOptionsTemplateFlowSet: - NetFlowSetStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "10", - "type": "OptionsTemplateFlowSet", - }). - Inc() - - NetFlowSetRecordsStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "10", - "type": "OptionsTemplateFlowSet", - }). - Add(float64(len(fsConv.Records))) - - case netflow.OptionsDataFlowSet: - - NetFlowSetStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "10", - "type": "OptionsDataFlowSet", - }). - Inc() - - NetFlowSetRecordsStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "10", - "type": "OptionsDataFlowSet", - }). - Add(float64(len(fsConv.Records))) - - case netflow.DataFlowSet: - NetFlowSetStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "10", - "type": "DataFlowSet", - }). - Inc() - - NetFlowSetRecordsStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "10", - "type": "DataFlowSet", - }). - Add(float64(len(fsConv.Records))) - } - } - flowMessageSet, err = producer.ProcessMessageNetFlow(msgDecConv, sampling) - - for _, fmsg := range flowMessageSet { - fmsg.TimeRecvd = uint64(time.Now().UTC().Unix()) - fmsg.RouterAddr = routerAddr - timeDiff := fmsg.TimeRecvd - fmsg.TimeFlowEnd - NetFlowTimeStatsSum.With( - prometheus.Labels{ - "router": key, - "version": "10", - }). - Observe(float64(timeDiff)) - } - } - - timeTrackStop := time.Now() - DecoderTime.With( - prometheus.Labels{ - "name": "NetFlow", - }). - Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) - - s.produceFlow(flowMessageSet) - - return nil -} - -func FlowMessageToString(fmsg *flowmessage.FlowMessage) string { - s := fmt.Sprintf("Type:%v TimeRecvd:%v SamplingRate:%v SequenceNum:%v TimeFlowStart:%v "+ - "TimeFlowEnd:%v SrcIP:%v DstIP:%v IPversion:%v Bytes:%v Packets:%v RouterAddr:%v "+ - "NextHop:%v NextHopAS:%v SrcAS:%v DstAS:%v SrcNet:%v DstNet:%v SrcIf:%v DstIf:%v "+ - "Proto:%v SrcPort:%v DstPort:%v IPTos:%v ForwardingStatus:%v IPTTL:%v TCPFlags:%v "+ - "SrcMac:%v DstMac:%v VlanId:%v Etype:%v IcmpType:%v IcmpCode:%v "+ - "SrcVlan:%v DstVlan:%v IPv6FlowLabel:%v", - fmsg.Type, fmsg.TimeRecvd, fmsg.SamplingRate, fmsg.SequenceNum, fmsg.TimeFlowStart, fmsg.TimeFlowEnd, - net.IP(fmsg.SrcIP), net.IP(fmsg.DstIP), fmsg.IPversion, fmsg.Bytes, fmsg.Packets, net.IP(fmsg.RouterAddr), net.IP(fmsg.NextHop), fmsg.NextHopAS, - fmsg.SrcAS, fmsg.DstAS, fmsg.SrcNet, fmsg.DstNet, fmsg.SrcIf, fmsg.DstIf, fmsg.Proto, - fmsg.SrcPort, fmsg.DstPort, fmsg.IPTos, fmsg.ForwardingStatus, fmsg.IPTTL, fmsg.TCPFlags, - fmsg.SrcMac, fmsg.DstMac, fmsg.VlanId, fmsg.Etype, fmsg.IcmpType, fmsg.IcmpCode, - fmsg.SrcVlan, fmsg.DstVlan, fmsg.IPv6FlowLabel) - return s -} - -func (s *state) produceFlow(fmsgset []*flowmessage.FlowMessage) { - for _, fmsg := range fmsgset { - if s.kafkaEn { - s.kafkaState.SendKafkaFlowMessage(fmsg) - } - if s.debug { - log.Debug(FlowMessageToString(fmsg)) - } - } - -} - -type BaseMessage struct { - Src net.IP - Port int - Payload []byte -} - -func (s *state) netflowRoutine() { - go templatesHTTP(s) - - s.templates = make(map[string]*TemplateSystem) - s.templateslock = &sync.RWMutex{} - s.sampling = make(map[string]producer.SamplingRateSystem) - s.samplinglock = &sync.RWMutex{} - - decoderParams := decoder.DecoderParams{ - DecoderFunc: s.decodeNetFlow, - DoneCallback: s.accountCallback, - ErrorCallback: nil, - } - log.Infof("Creating NetFlow message processor with %v workers", s.fworkers) - processor := decoder.CreateProcessor(s.fworkers, decoderParams, "NetFlow") - log.WithFields(log.Fields{ - "Name": "NetFlow"}).Debug("Starting workers") - processor.Start() - - addr := net.UDPAddr{ - IP: net.ParseIP(*FAddr), - Port: *FPort, - } - - var ( - udpconn *net.UDPConn - err error - ) - - if *FReuse { - pconn, err := reuseport.ListenPacket("udp", addr.String()) - if err != nil { - log.Fatalf("Fatal error: could not listen to UDP (%v)", err) - pconn.Close() - } - var ok bool - udpconn, ok = pconn.(*net.UDPConn) - if !ok { - log.Fatalf("Fatal error: could not listen to UDP with so_reuseport") - pconn.Close() - } - - } else { - - udpconn, err = net.ListenUDP("udp", &addr) - if err != nil { - log.Fatalf("Fatal error: could not listen to UDP (%v)", err) - udpconn.Close() - } - } - - payload := make([]byte, 9000) - - localIP := addr.IP.String() - if addr.IP == nil { - localIP = "" - } - log.WithFields(log.Fields{ - "Type": "NetFlow"}). - Infof("Listening on UDP %v:%v", localIP, strconv.Itoa(addr.Port)) - for { - size, pktAddr, _ := udpconn.ReadFromUDP(payload) - payloadCut := make([]byte, size) - copy(payloadCut, payload[0:size]) - - baseMessage := BaseMessage{ - Src: pktAddr.IP, - Port: pktAddr.Port, - Payload: payloadCut, - } - processor.ProcessMessage(baseMessage) - - MetricTrafficBytes.With( - prometheus.Labels{ - "remote_ip": pktAddr.IP.String(), - "remote_port": strconv.Itoa(pktAddr.Port), - "local_ip": localIP, - "local_port": strconv.Itoa(addr.Port), - "type": "NetFlow", - }). - Add(float64(size)) - MetricTrafficPackets.With( - prometheus.Labels{ - "remote_ip": pktAddr.IP.String(), - "remote_port": strconv.Itoa(pktAddr.Port), - "local_ip": localIP, - "local_port": strconv.Itoa(addr.Port), - "type": "NetFlow", - }). - Inc() - MetricPacketSizeSum.With( - prometheus.Labels{ - "remote_ip": pktAddr.IP.String(), - "remote_port": strconv.Itoa(pktAddr.Port), - "local_ip": localIP, - "local_port": strconv.Itoa(addr.Port), - "type": "NetFlow", - }). - Observe(float64(size)) - } - - udpconn.Close() -} - -func (s *state) decodeSflow(msg interface{}) error { - pkt := msg.(BaseMessage) - buf := bytes.NewBuffer(pkt.Payload) - key := pkt.Src.String() - - timeTrackStart := time.Now() - msgDec, err := sflow.DecodeMessage(buf) - - if err != nil { - switch err.(type) { - case *sflow.ErrorVersion: - SFlowErrors.With( - prometheus.Labels{ - "router": key, - "error": "error_version", - }). - Inc() - case *sflow.ErrorIPVersion: - SFlowErrors.With( - prometheus.Labels{ - "router": key, - "error": "error_ip_version", - }). - Inc() - case *sflow.ErrorDataFormat: - SFlowErrors.With( - prometheus.Labels{ - "router": key, - "error": "error_data_format", - }). - Inc() - default: - SFlowErrors.With( - prometheus.Labels{ - "router": key, - "error": "error_decoding", - }). - Inc() - } - return err - } - - switch msgDecConv := msgDec.(type) { - case sflow.Packet: - agentStr := net.IP(msgDecConv.AgentIP).String() - SFlowStats.With( - prometheus.Labels{ - "router": key, - "agent": agentStr, - "version": "5", - }). - Inc() - - for _, samples := range msgDecConv.Samples { - typeStr := "unknown" - countRec := 0 - switch samplesConv := samples.(type) { - case sflow.FlowSample: - typeStr = "FlowSample" - countRec = len(samplesConv.Records) - case sflow.CounterSample: - typeStr = "CounterSample" - if samplesConv.Header.Format == 4 { - typeStr = "Expanded" + typeStr - } - countRec = len(samplesConv.Records) - case sflow.ExpandedFlowSample: - typeStr = "ExpandedFlowSample" - countRec = len(samplesConv.Records) - } - SFlowSampleStatsSum.With( - prometheus.Labels{ - "router": key, - "agent": agentStr, - "version": "5", - "type": typeStr, - }). - Inc() - - SFlowSampleRecordsStatsSum.With( - prometheus.Labels{ - "router": key, - "agent": agentStr, - "version": "5", - "type": typeStr, - }). - Add(float64(countRec)) - } - - } - - var flowMessageSet []*flowmessage.FlowMessage - flowMessageSet, err = producer.ProcessMessageSFlow(msgDec) - - timeTrackStop := time.Now() - DecoderTime.With( - prometheus.Labels{ - "name": "sFlow", - }). - Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) - - ts := uint64(time.Now().UTC().Unix()) - for _, fmsg := range flowMessageSet { - fmsg.TimeRecvd = ts - fmsg.TimeFlow = ts // deprecate this - fmsg.TimeFlowEnd = ts - } - - s.produceFlow(flowMessageSet) - - return nil -} - -func (s *state) accountCallback(name string, id int, start, end time.Time) { - DecoderProcessTime.With( - prometheus.Labels{ - "name": name, - }). - Observe(float64((end.Sub(start)).Nanoseconds()) / 1000) - DecoderStats.With( - prometheus.Labels{ - "worker": strconv.Itoa(id), - "name": name, - }). - Inc() -} - -type state struct { - kafkaState *transport.KafkaState - kafkaEn bool - - templateslock *sync.RWMutex - templates map[string]*TemplateSystem - - samplinglock *sync.RWMutex - sampling map[string]producer.SamplingRateSystem - - debug bool - - fworkers int - sworkers int -} - -func (s *state) sflowRoutine() { - decoderParams := decoder.DecoderParams{ - DecoderFunc: s.decodeSflow, - DoneCallback: s.accountCallback, - ErrorCallback: nil, - } - - processor := decoder.CreateProcessor(s.sworkers, decoderParams, "sFlow") - log.WithFields(log.Fields{ - "Name": "sFlow"}).Debug("Starting workers") - processor.Start() - - addr := net.UDPAddr{ - IP: net.ParseIP(*SAddr), - Port: *SPort, - } - - var ( - udpconn *net.UDPConn - err error - ) - - if *SReuse { - pconn, err := reuseport.ListenPacket("udp", addr.String()) - if err != nil { - log.Fatalf("Fatal error: could not listen to UDP (%v)", err) - pconn.Close() - } - var ok bool - udpconn, ok = pconn.(*net.UDPConn) - if !ok { - log.Fatalf("Fatal error: could not listen to UDP with so_reuseport") - pconn.Close() - } - - } else { - - udpconn, err = net.ListenUDP("udp", &addr) - if err != nil { - log.Fatalf("Fatal error: could not listen to UDP (%v)", err) - udpconn.Close() - } - } - - payload := make([]byte, 9000) - - localIP := addr.IP.String() - if addr.IP == nil { - localIP = "" - } - log.WithFields(log.Fields{ - "Type": "sFlow"}). - Infof("Listening on UDP %v:%v", localIP, strconv.Itoa(addr.Port)) - for { - size, pktAddr, _ := udpconn.ReadFromUDP(payload) - payloadCut := make([]byte, size) - copy(payloadCut, payload[0:size]) - - baseMessage := BaseMessage{ - Src: pktAddr.IP, - Port: pktAddr.Port, - Payload: payloadCut, - } - processor.ProcessMessage(baseMessage) - - MetricTrafficBytes.With( - prometheus.Labels{ - "remote_ip": pktAddr.IP.String(), - "remote_port": strconv.Itoa(pktAddr.Port), - "local_ip": localIP, - "local_port": strconv.Itoa(addr.Port), - "type": "sFlow", - }). - Add(float64(size)) - MetricTrafficPackets.With( - prometheus.Labels{ - "remote_ip": pktAddr.IP.String(), - "remote_port": strconv.Itoa(pktAddr.Port), - "local_ip": localIP, - "local_port": strconv.Itoa(addr.Port), - "type": "sFlow", - }). - Inc() - MetricPacketSizeSum.With( - prometheus.Labels{ - "remote_ip": pktAddr.IP.String(), - "remote_port": strconv.Itoa(pktAddr.Port), - "local_ip": localIP, - "local_port": strconv.Itoa(addr.Port), - "type": "sFlow", - }). - Observe(float64(size)) - } - - udpconn.Close() -} - -func GetServiceAddresses(srv string) (addrs []string, err error) { - _, srvs, err := net.LookupSRV("", "", srv) - if err != nil { - return nil, errors.New(fmt.Sprintf("Service discovery: %v\n", err)) - } - for _, srv := range srvs { - addrs = append(addrs, net.JoinHostPort(srv.Target, strconv.Itoa(int(srv.Port)))) - } - return addrs, nil -} - -func main() { - flag.Parse() - - if *Version { - fmt.Println(AppVersion) - os.Exit(0) - } - - go metricsHTTP() - - lvl, _ := log.ParseLevel(*LogLevel) - log.SetLevel(lvl) - switch *LogFmt { - case "json": - log.SetFormatter(&log.JSONFormatter{}) - } - - runtime.GOMAXPROCS(runtime.NumCPU()) - - wg := &sync.WaitGroup{} - log.WithFields(log.Fields{ - "NetFlow": *FEnable, - "sFlow": *SEnable}). - Info("Starting GoFlow") - - s := &state{ - fworkers: *FWorkers, - sworkers: *SWorkers, - } - - if *LogLevel == "debug" { - s.debug = true - } - - if *EnableKafka { - addrs := make([]string, 0) - if *KafkaSrv != "" { - addrs, _ = GetServiceAddresses(*KafkaSrv) - } else { - addrs = strings.Split(*KafkaBrk, ",") - } - kafkaState := transport.StartKafkaProducer(addrs, *KafkaTopic, *KafkaTLS, *KafkaSASL) - s.kafkaState = kafkaState - s.kafkaEn = true - } - - if *FEnable { - (*wg).Add(1) - go func() { - s.netflowRoutine() - (*wg).Done() - }() - } - if *SEnable { - (*wg).Add(1) - go func() { - s.sflowRoutine() - (*wg).Done() - }() - } - - (*wg).Wait() -} diff --git a/pb/flow.pb.go b/pb/flow.pb.go index 4e92d69..e26da4e 100644 --- a/pb/flow.pb.go +++ b/pb/flow.pb.go @@ -24,23 +24,26 @@ type FlowMessage_FlowType int32 const ( FlowMessage_FLOWUNKNOWN FlowMessage_FlowType = 0 - FlowMessage_NFV9 FlowMessage_FlowType = 9 - FlowMessage_IPFIX FlowMessage_FlowType = 10 - FlowMessage_SFLOW FlowMessage_FlowType = 5 + FlowMessage_SFLOW_5 FlowMessage_FlowType = 1 + FlowMessage_NETFLOW_V5 FlowMessage_FlowType = 2 + FlowMessage_NETFLOW_V9 FlowMessage_FlowType = 3 + FlowMessage_IPFIX FlowMessage_FlowType = 4 ) var FlowMessage_FlowType_name = map[int32]string{ - 0: "FLOWUNKNOWN", - 9: "NFV9", - 10: "IPFIX", - 5: "SFLOW", + 0: "FLOWUNKNOWN", + 1: "SFLOW_5", + 2: "NETFLOW_V5", + 3: "NETFLOW_V9", + 4: "IPFIX", } var FlowMessage_FlowType_value = map[string]int32{ "FLOWUNKNOWN": 0, - "NFV9": 9, - "IPFIX": 10, - "SFLOW": 5, + "SFLOW_5": 1, + "NETFLOW_V5": 2, + "NETFLOW_V9": 3, + "IPFIX": 4, } func (x FlowMessage_FlowType) String() string { @@ -51,94 +54,64 @@ func (FlowMessage_FlowType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0beab9b6746e934c, []int{0, 0} } -// To be deprecated -type FlowMessage_IPType int32 - -const ( - FlowMessage_IPUNKNOWN FlowMessage_IPType = 0 - FlowMessage_IPv4 FlowMessage_IPType = 4 - FlowMessage_IPv6 FlowMessage_IPType = 6 -) - -var FlowMessage_IPType_name = map[int32]string{ - 0: "IPUNKNOWN", - 4: "IPv4", - 6: "IPv6", -} - -var FlowMessage_IPType_value = map[string]int32{ - "IPUNKNOWN": 0, - "IPv4": 4, - "IPv6": 6, -} - -func (x FlowMessage_IPType) String() string { - return proto.EnumName(FlowMessage_IPType_name, int32(x)) -} - -func (FlowMessage_IPType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0beab9b6746e934c, []int{0, 1} -} - type FlowMessage struct { - Type FlowMessage_FlowType `protobuf:"varint,1,opt,name=Type,proto3,enum=flowprotob.FlowMessage_FlowType" json:"Type,omitempty"` - TimeRecvd uint64 `protobuf:"varint,2,opt,name=TimeRecvd,proto3" json:"TimeRecvd,omitempty"` - SamplingRate uint64 `protobuf:"varint,3,opt,name=SamplingRate,proto3" json:"SamplingRate,omitempty"` - SequenceNum uint32 `protobuf:"varint,4,opt,name=SequenceNum,proto3" json:"SequenceNum,omitempty"` - // Found inside packet (To be deprecated in favor of TimeFlowEnd) - TimeFlow uint64 `protobuf:"varint,5,opt,name=TimeFlow,proto3" json:"TimeFlow,omitempty"` - // Source/destination addresses - SrcIP []byte `protobuf:"bytes,6,opt,name=SrcIP,proto3" json:"SrcIP,omitempty"` - DstIP []byte `protobuf:"bytes,7,opt,name=DstIP,proto3" json:"DstIP,omitempty"` - IPversion FlowMessage_IPType `protobuf:"varint,8,opt,name=IPversion,proto3,enum=flowprotob.FlowMessage_IPType" json:"IPversion,omitempty"` + Type FlowMessage_FlowType `protobuf:"varint,1,opt,name=Type,proto3,enum=flowprotob.FlowMessage_FlowType" json:"Type,omitempty"` + TimeReceived uint64 `protobuf:"varint,2,opt,name=TimeReceived,proto3" json:"TimeReceived,omitempty"` + SequenceNum uint32 `protobuf:"varint,4,opt,name=SequenceNum,proto3" json:"SequenceNum,omitempty"` + SamplingRate uint64 `protobuf:"varint,3,opt,name=SamplingRate,proto3" json:"SamplingRate,omitempty"` + FlowDirection uint32 `protobuf:"varint,42,opt,name=FlowDirection,proto3" json:"FlowDirection,omitempty"` + // Sampler information + SamplerAddress []byte `protobuf:"bytes,11,opt,name=SamplerAddress,proto3" json:"SamplerAddress,omitempty"` + // Found inside packet + TimeFlowStart uint64 `protobuf:"varint,38,opt,name=TimeFlowStart,proto3" json:"TimeFlowStart,omitempty"` + TimeFlowEnd uint64 `protobuf:"varint,5,opt,name=TimeFlowEnd,proto3" json:"TimeFlowEnd,omitempty"` // Size of the sampled packet Bytes uint64 `protobuf:"varint,9,opt,name=Bytes,proto3" json:"Bytes,omitempty"` Packets uint64 `protobuf:"varint,10,opt,name=Packets,proto3" json:"Packets,omitempty"` - // Routing information - RouterAddr []byte `protobuf:"bytes,11,opt,name=RouterAddr,proto3" json:"RouterAddr,omitempty"` - NextHop []byte `protobuf:"bytes,12,opt,name=NextHop,proto3" json:"NextHop,omitempty"` - NextHopAS uint32 `protobuf:"varint,13,opt,name=NextHopAS,proto3" json:"NextHopAS,omitempty"` - // Autonomous system information - SrcAS uint32 `protobuf:"varint,14,opt,name=SrcAS,proto3" json:"SrcAS,omitempty"` - DstAS uint32 `protobuf:"varint,15,opt,name=DstAS,proto3" json:"DstAS,omitempty"` - // Prefix size - SrcNet uint32 `protobuf:"varint,16,opt,name=SrcNet,proto3" json:"SrcNet,omitempty"` - DstNet uint32 `protobuf:"varint,17,opt,name=DstNet,proto3" json:"DstNet,omitempty"` - // Interfaces - SrcIf uint32 `protobuf:"varint,18,opt,name=SrcIf,proto3" json:"SrcIf,omitempty"` - DstIf uint32 `protobuf:"varint,19,opt,name=DstIf,proto3" json:"DstIf,omitempty"` + // Source/destination addresses + SrcAddr []byte `protobuf:"bytes,6,opt,name=SrcAddr,proto3" json:"SrcAddr,omitempty"` + DstAddr []byte `protobuf:"bytes,7,opt,name=DstAddr,proto3" json:"DstAddr,omitempty"` + // Layer 3 protocol (IPv4/IPv6/ARP/...) + Etype uint32 `protobuf:"varint,30,opt,name=Etype,proto3" json:"Etype,omitempty"` // Layer 4 protocol Proto uint32 `protobuf:"varint,20,opt,name=Proto,proto3" json:"Proto,omitempty"` - // Port for UDP and TCP + // Ports for UDP and TCP SrcPort uint32 `protobuf:"varint,21,opt,name=SrcPort,proto3" json:"SrcPort,omitempty"` DstPort uint32 `protobuf:"varint,22,opt,name=DstPort,proto3" json:"DstPort,omitempty"` - // IP and TCP special flags - IPTos uint32 `protobuf:"varint,23,opt,name=IPTos,proto3" json:"IPTos,omitempty"` - ForwardingStatus uint32 `protobuf:"varint,24,opt,name=ForwardingStatus,proto3" json:"ForwardingStatus,omitempty"` - IPTTL uint32 `protobuf:"varint,25,opt,name=IPTTL,proto3" json:"IPTTL,omitempty"` - TCPFlags uint32 `protobuf:"varint,26,opt,name=TCPFlags,proto3" json:"TCPFlags,omitempty"` + // Interfaces + SrcIf uint32 `protobuf:"varint,18,opt,name=SrcIf,proto3" json:"SrcIf,omitempty"` + DstIf uint32 `protobuf:"varint,19,opt,name=DstIf,proto3" json:"DstIf,omitempty"` // Ethernet information SrcMac uint64 `protobuf:"varint,27,opt,name=SrcMac,proto3" json:"SrcMac,omitempty"` DstMac uint64 `protobuf:"varint,28,opt,name=DstMac,proto3" json:"DstMac,omitempty"` - // 802.1q VLAN in sampled packet - VlanId uint32 `protobuf:"varint,29,opt,name=VlanId,proto3" json:"VlanId,omitempty"` - // Layer 3 protocol (IPv4/IPv6/ARP/...) - Etype uint32 `protobuf:"varint,30,opt,name=Etype,proto3" json:"Etype,omitempty"` - IcmpType uint32 `protobuf:"varint,31,opt,name=IcmpType,proto3" json:"IcmpType,omitempty"` - IcmpCode uint32 `protobuf:"varint,32,opt,name=IcmpCode,proto3" json:"IcmpCode,omitempty"` // Vlan SrcVlan uint32 `protobuf:"varint,33,opt,name=SrcVlan,proto3" json:"SrcVlan,omitempty"` DstVlan uint32 `protobuf:"varint,34,opt,name=DstVlan,proto3" json:"DstVlan,omitempty"` - // Fragments (IPv4/IPv6) - FragmentId uint32 `protobuf:"varint,35,opt,name=FragmentId,proto3" json:"FragmentId,omitempty"` - FragmentOffset uint32 `protobuf:"varint,36,opt,name=FragmentOffset,proto3" json:"FragmentOffset,omitempty"` - IPv6FlowLabel uint32 `protobuf:"varint,37,opt,name=IPv6FlowLabel,proto3" json:"IPv6FlowLabel,omitempty"` + // 802.1q VLAN in sampled packet + VlanId uint32 `protobuf:"varint,29,opt,name=VlanId,proto3" json:"VlanId,omitempty"` // VRF - IngressVrfId uint32 `protobuf:"varint,38,opt,name=IngressVrfId,proto3" json:"IngressVrfId,omitempty"` - EgressVrfId uint32 `protobuf:"varint,39,opt,name=EgressVrfId,proto3" json:"EgressVrfId,omitempty"` - // time information, found inside packets - TimeFlowStart uint64 `protobuf:"varint,40,opt,name=TimeFlowStart,proto3" json:"TimeFlowStart,omitempty"` - TimeFlowEnd uint64 `protobuf:"varint,41,opt,name=TimeFlowEnd,proto3" json:"TimeFlowEnd,omitempty"` + IngressVrfID uint32 `protobuf:"varint,39,opt,name=IngressVrfID,proto3" json:"IngressVrfID,omitempty"` + EgressVrfID uint32 `protobuf:"varint,40,opt,name=EgressVrfID,proto3" json:"EgressVrfID,omitempty"` + // IP and TCP special flags + IPTos uint32 `protobuf:"varint,23,opt,name=IPTos,proto3" json:"IPTos,omitempty"` + ForwardingStatus uint32 `protobuf:"varint,24,opt,name=ForwardingStatus,proto3" json:"ForwardingStatus,omitempty"` + IPTTL uint32 `protobuf:"varint,25,opt,name=IPTTL,proto3" json:"IPTTL,omitempty"` + TCPFlags uint32 `protobuf:"varint,26,opt,name=TCPFlags,proto3" json:"TCPFlags,omitempty"` + IcmpType uint32 `protobuf:"varint,31,opt,name=IcmpType,proto3" json:"IcmpType,omitempty"` + IcmpCode uint32 `protobuf:"varint,32,opt,name=IcmpCode,proto3" json:"IcmpCode,omitempty"` + IPv6FlowLabel uint32 `protobuf:"varint,37,opt,name=IPv6FlowLabel,proto3" json:"IPv6FlowLabel,omitempty"` + // Fragments (IPv4/IPv6) + FragmentId uint32 `protobuf:"varint,35,opt,name=FragmentId,proto3" json:"FragmentId,omitempty"` + FragmentOffset uint32 `protobuf:"varint,36,opt,name=FragmentOffset,proto3" json:"FragmentOffset,omitempty"` + BiFlowDirection uint32 `protobuf:"varint,41,opt,name=BiFlowDirection,proto3" json:"BiFlowDirection,omitempty"` + // Autonomous system information + SrcAS uint32 `protobuf:"varint,14,opt,name=SrcAS,proto3" json:"SrcAS,omitempty"` + DstAS uint32 `protobuf:"varint,15,opt,name=DstAS,proto3" json:"DstAS,omitempty"` + NextHop []byte `protobuf:"bytes,12,opt,name=NextHop,proto3" json:"NextHop,omitempty"` + NextHopAS uint32 `protobuf:"varint,13,opt,name=NextHopAS,proto3" json:"NextHopAS,omitempty"` + // Prefix size + SrcNet uint32 `protobuf:"varint,16,opt,name=SrcNet,proto3" json:"SrcNet,omitempty"` + DstNet uint32 `protobuf:"varint,17,opt,name=DstNet,proto3" json:"DstNet,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -176,53 +149,53 @@ func (m *FlowMessage) GetType() FlowMessage_FlowType { return FlowMessage_FLOWUNKNOWN } -func (m *FlowMessage) GetTimeRecvd() uint64 { +func (m *FlowMessage) GetTimeReceived() uint64 { if m != nil { - return m.TimeRecvd + return m.TimeReceived } return 0 } -func (m *FlowMessage) GetSamplingRate() uint64 { +func (m *FlowMessage) GetSequenceNum() uint32 { if m != nil { - return m.SamplingRate + return m.SequenceNum } return 0 } -func (m *FlowMessage) GetSequenceNum() uint32 { +func (m *FlowMessage) GetSamplingRate() uint64 { if m != nil { - return m.SequenceNum + return m.SamplingRate } return 0 } -func (m *FlowMessage) GetTimeFlow() uint64 { +func (m *FlowMessage) GetFlowDirection() uint32 { if m != nil { - return m.TimeFlow + return m.FlowDirection } return 0 } -func (m *FlowMessage) GetSrcIP() []byte { +func (m *FlowMessage) GetSamplerAddress() []byte { if m != nil { - return m.SrcIP + return m.SamplerAddress } return nil } -func (m *FlowMessage) GetDstIP() []byte { +func (m *FlowMessage) GetTimeFlowStart() uint64 { if m != nil { - return m.DstIP + return m.TimeFlowStart } - return nil + return 0 } -func (m *FlowMessage) GetIPversion() FlowMessage_IPType { +func (m *FlowMessage) GetTimeFlowEnd() uint64 { if m != nil { - return m.IPversion + return m.TimeFlowEnd } - return FlowMessage_IPUNKNOWN + return 0 } func (m *FlowMessage) GetBytes() uint64 { @@ -239,51 +212,44 @@ func (m *FlowMessage) GetPackets() uint64 { return 0 } -func (m *FlowMessage) GetRouterAddr() []byte { +func (m *FlowMessage) GetSrcAddr() []byte { if m != nil { - return m.RouterAddr + return m.SrcAddr } return nil } -func (m *FlowMessage) GetNextHop() []byte { +func (m *FlowMessage) GetDstAddr() []byte { if m != nil { - return m.NextHop + return m.DstAddr } return nil } -func (m *FlowMessage) GetNextHopAS() uint32 { - if m != nil { - return m.NextHopAS - } - return 0 -} - -func (m *FlowMessage) GetSrcAS() uint32 { +func (m *FlowMessage) GetEtype() uint32 { if m != nil { - return m.SrcAS + return m.Etype } return 0 } -func (m *FlowMessage) GetDstAS() uint32 { +func (m *FlowMessage) GetProto() uint32 { if m != nil { - return m.DstAS + return m.Proto } return 0 } -func (m *FlowMessage) GetSrcNet() uint32 { +func (m *FlowMessage) GetSrcPort() uint32 { if m != nil { - return m.SrcNet + return m.SrcPort } return 0 } -func (m *FlowMessage) GetDstNet() uint32 { +func (m *FlowMessage) GetDstPort() uint32 { if m != nil { - return m.DstNet + return m.DstPort } return 0 } @@ -302,79 +268,79 @@ func (m *FlowMessage) GetDstIf() uint32 { return 0 } -func (m *FlowMessage) GetProto() uint32 { +func (m *FlowMessage) GetSrcMac() uint64 { if m != nil { - return m.Proto + return m.SrcMac } return 0 } -func (m *FlowMessage) GetSrcPort() uint32 { +func (m *FlowMessage) GetDstMac() uint64 { if m != nil { - return m.SrcPort + return m.DstMac } return 0 } -func (m *FlowMessage) GetDstPort() uint32 { +func (m *FlowMessage) GetSrcVlan() uint32 { if m != nil { - return m.DstPort + return m.SrcVlan } return 0 } -func (m *FlowMessage) GetIPTos() uint32 { +func (m *FlowMessage) GetDstVlan() uint32 { if m != nil { - return m.IPTos + return m.DstVlan } return 0 } -func (m *FlowMessage) GetForwardingStatus() uint32 { +func (m *FlowMessage) GetVlanId() uint32 { if m != nil { - return m.ForwardingStatus + return m.VlanId } return 0 } -func (m *FlowMessage) GetIPTTL() uint32 { +func (m *FlowMessage) GetIngressVrfID() uint32 { if m != nil { - return m.IPTTL + return m.IngressVrfID } return 0 } -func (m *FlowMessage) GetTCPFlags() uint32 { +func (m *FlowMessage) GetEgressVrfID() uint32 { if m != nil { - return m.TCPFlags + return m.EgressVrfID } return 0 } -func (m *FlowMessage) GetSrcMac() uint64 { +func (m *FlowMessage) GetIPTos() uint32 { if m != nil { - return m.SrcMac + return m.IPTos } return 0 } -func (m *FlowMessage) GetDstMac() uint64 { +func (m *FlowMessage) GetForwardingStatus() uint32 { if m != nil { - return m.DstMac + return m.ForwardingStatus } return 0 } -func (m *FlowMessage) GetVlanId() uint32 { +func (m *FlowMessage) GetIPTTL() uint32 { if m != nil { - return m.VlanId + return m.IPTTL } return 0 } -func (m *FlowMessage) GetEtype() uint32 { +func (m *FlowMessage) GetTCPFlags() uint32 { if m != nil { - return m.Etype + return m.TCPFlags } return 0 } @@ -393,121 +359,127 @@ func (m *FlowMessage) GetIcmpCode() uint32 { return 0 } -func (m *FlowMessage) GetSrcVlan() uint32 { +func (m *FlowMessage) GetIPv6FlowLabel() uint32 { if m != nil { - return m.SrcVlan + return m.IPv6FlowLabel } return 0 } -func (m *FlowMessage) GetDstVlan() uint32 { +func (m *FlowMessage) GetFragmentId() uint32 { if m != nil { - return m.DstVlan + return m.FragmentId } return 0 } -func (m *FlowMessage) GetFragmentId() uint32 { +func (m *FlowMessage) GetFragmentOffset() uint32 { if m != nil { - return m.FragmentId + return m.FragmentOffset } return 0 } -func (m *FlowMessage) GetFragmentOffset() uint32 { +func (m *FlowMessage) GetBiFlowDirection() uint32 { if m != nil { - return m.FragmentOffset + return m.BiFlowDirection } return 0 } -func (m *FlowMessage) GetIPv6FlowLabel() uint32 { +func (m *FlowMessage) GetSrcAS() uint32 { if m != nil { - return m.IPv6FlowLabel + return m.SrcAS } return 0 } -func (m *FlowMessage) GetIngressVrfId() uint32 { +func (m *FlowMessage) GetDstAS() uint32 { if m != nil { - return m.IngressVrfId + return m.DstAS } return 0 } -func (m *FlowMessage) GetEgressVrfId() uint32 { +func (m *FlowMessage) GetNextHop() []byte { if m != nil { - return m.EgressVrfId + return m.NextHop + } + return nil +} + +func (m *FlowMessage) GetNextHopAS() uint32 { + if m != nil { + return m.NextHopAS } return 0 } -func (m *FlowMessage) GetTimeFlowStart() uint64 { +func (m *FlowMessage) GetSrcNet() uint32 { if m != nil { - return m.TimeFlowStart + return m.SrcNet } return 0 } -func (m *FlowMessage) GetTimeFlowEnd() uint64 { +func (m *FlowMessage) GetDstNet() uint32 { if m != nil { - return m.TimeFlowEnd + return m.DstNet } return 0 } func init() { proto.RegisterEnum("flowprotob.FlowMessage_FlowType", FlowMessage_FlowType_name, FlowMessage_FlowType_value) - proto.RegisterEnum("flowprotob.FlowMessage_IPType", FlowMessage_IPType_name, FlowMessage_IPType_value) proto.RegisterType((*FlowMessage)(nil), "flowprotob.FlowMessage") } func init() { proto.RegisterFile("pb/flow.proto", fileDescriptor_0beab9b6746e934c) } var fileDescriptor_0beab9b6746e934c = []byte{ - // 703 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x94, 0x6f, 0x73, 0xd2, 0x4a, - 0x14, 0xc6, 0x2f, 0xf7, 0x52, 0x0a, 0x4b, 0x69, 0x73, 0xf7, 0xf6, 0xd6, 0x63, 0xad, 0x88, 0x58, - 0x2b, 0xda, 0x19, 0x9c, 0xd1, 0x4e, 0x67, 0x1c, 0x7d, 0x43, 0xff, 0x30, 0x66, 0xa4, 0x34, 0x43, - 0xb0, 0xf5, 0x6d, 0x48, 0x36, 0x19, 0xc6, 0x90, 0xe0, 0x66, 0xa1, 0xf6, 0x33, 0xf9, 0x25, 0x9d, - 0x73, 0x36, 0x09, 0x41, 0xc7, 0x77, 0xfb, 0xfc, 0xce, 0xb3, 0x67, 0xd9, 0x87, 0x9c, 0x65, 0x8d, - 0xf9, 0xe4, 0xb5, 0x1f, 0xc6, 0x77, 0xdd, 0xb9, 0x8c, 0x55, 0xcc, 0x19, 0xae, 0x69, 0x39, 0x69, - 0xff, 0x60, 0xac, 0xde, 0x0f, 0xe3, 0xbb, 0x2b, 0x91, 0x24, 0x4e, 0x20, 0xf8, 0x09, 0x2b, 0x8f, - 0xef, 0xe7, 0x02, 0x4a, 0xad, 0x52, 0x67, 0xfb, 0x4d, 0xab, 0xbb, 0xb2, 0x76, 0x0b, 0x36, 0x5a, - 0xa3, 0x6f, 0x44, 0x6e, 0x7e, 0xc0, 0x6a, 0xe3, 0xe9, 0x4c, 0x8c, 0x84, 0xbb, 0xf4, 0xe0, 0xef, - 0x56, 0xa9, 0x53, 0x1e, 0xad, 0x00, 0x6f, 0xb3, 0x2d, 0xdb, 0x99, 0xcd, 0xc3, 0x69, 0x14, 0x8c, - 0x1c, 0x25, 0xe0, 0x1f, 0x32, 0xac, 0x31, 0xde, 0x62, 0x75, 0x5b, 0x7c, 0x5b, 0x88, 0xc8, 0x15, - 0xc3, 0xc5, 0x0c, 0xca, 0xad, 0x52, 0xa7, 0x31, 0x2a, 0x22, 0xbe, 0xcf, 0xaa, 0xd8, 0x12, 0x4f, - 0x86, 0x0d, 0xea, 0x90, 0x6b, 0xbe, 0xcb, 0x36, 0x6c, 0xe9, 0x9a, 0x16, 0x54, 0x5a, 0xa5, 0xce, - 0xd6, 0x48, 0x0b, 0xa4, 0x17, 0x89, 0x32, 0x2d, 0xd8, 0xd4, 0x94, 0x04, 0xff, 0xc0, 0x6a, 0xa6, - 0xb5, 0x14, 0x32, 0x99, 0xc6, 0x11, 0x54, 0xe9, 0x9a, 0xcd, 0x3f, 0x5d, 0xd3, 0xb4, 0xe8, 0x92, - 0xab, 0x0d, 0xd8, 0xf3, 0xec, 0x5e, 0x89, 0x04, 0x6a, 0xf4, 0x13, 0xb4, 0xe0, 0xc0, 0x36, 0x2d, - 0xc7, 0xfd, 0x2a, 0x54, 0x02, 0x8c, 0x78, 0x26, 0x79, 0x93, 0xb1, 0x51, 0xbc, 0x50, 0x42, 0xf6, - 0x3c, 0x4f, 0x42, 0x9d, 0x7e, 0x48, 0x81, 0xe0, 0xce, 0xa1, 0xf8, 0xae, 0x3e, 0xc6, 0x73, 0xd8, - 0xa2, 0x62, 0x26, 0x31, 0xd3, 0x74, 0xd9, 0xb3, 0xa1, 0x41, 0x79, 0xac, 0x40, 0x7a, 0xe3, 0x9e, - 0x0d, 0xdb, 0x54, 0xd1, 0x22, 0xbd, 0x71, 0xcf, 0x86, 0x1d, 0x4d, 0x49, 0xf0, 0x3d, 0x56, 0xb1, - 0xa5, 0x3b, 0x14, 0x0a, 0x0c, 0xc2, 0xa9, 0x42, 0x7e, 0x91, 0x28, 0xe4, 0xff, 0x6a, 0xae, 0x55, - 0x96, 0xa6, 0x0f, 0x3c, 0xef, 0x6d, 0xfa, 0x59, 0x9a, 0x3e, 0xfc, 0x97, 0xf7, 0xd6, 0xd4, 0xc2, - 0xdc, 0x60, 0x57, 0x53, 0x12, 0x78, 0x2b, 0x5b, 0xba, 0x56, 0x2c, 0x15, 0xfc, 0x4f, 0x3c, 0x93, - 0x58, 0xb9, 0x48, 0x14, 0x55, 0xf6, 0x74, 0x25, 0x95, 0xd8, 0xc9, 0xb4, 0xc6, 0x71, 0x02, 0x0f, - 0x74, 0x27, 0x12, 0xfc, 0x15, 0x33, 0xfa, 0xb1, 0xbc, 0x73, 0xa4, 0x37, 0x8d, 0x02, 0x5b, 0x39, - 0x6a, 0x91, 0x00, 0x90, 0xe1, 0x37, 0x9e, 0x76, 0x18, 0x0f, 0xe0, 0x61, 0xde, 0x61, 0x3c, 0xa0, - 0xef, 0xe6, 0xdc, 0xea, 0x87, 0x4e, 0x90, 0xc0, 0x3e, 0x15, 0x72, 0x9d, 0x26, 0x73, 0xe5, 0xb8, - 0xf0, 0x88, 0xfe, 0xb6, 0x54, 0xa5, 0xc9, 0x20, 0x3f, 0xd0, 0x5c, 0x2b, 0xe4, 0x37, 0xa1, 0x13, - 0x99, 0x1e, 0x3c, 0xd6, 0x89, 0x69, 0x85, 0x27, 0x5f, 0x2a, 0x1c, 0x9b, 0xa6, 0x3e, 0x99, 0x04, - 0x9e, 0x6c, 0xba, 0xb3, 0x39, 0xcd, 0xd3, 0x13, 0x7d, 0x72, 0xa6, 0xb3, 0xda, 0x79, 0xec, 0x09, - 0x68, 0xad, 0x6a, 0xa8, 0xd3, 0xf4, 0xb0, 0x35, 0x3c, 0xcd, 0xd3, 0x43, 0x99, 0xa6, 0x47, 0x95, - 0x76, 0x9e, 0x1e, 0x55, 0x9a, 0x8c, 0xf5, 0xa5, 0x13, 0xcc, 0x44, 0xa4, 0x4c, 0x0f, 0x9e, 0x51, - 0xb1, 0x40, 0xf8, 0x11, 0xdb, 0xce, 0xd4, 0xb5, 0xef, 0x27, 0x42, 0xc1, 0x21, 0x79, 0x7e, 0xa1, - 0xfc, 0x90, 0x35, 0x4c, 0x6b, 0x79, 0x8a, 0x43, 0x30, 0x70, 0x26, 0x22, 0x84, 0xe7, 0x64, 0x5b, - 0x87, 0x38, 0xd1, 0x66, 0x14, 0x48, 0x91, 0x24, 0x37, 0xd2, 0x37, 0x3d, 0x38, 0x22, 0xd3, 0x1a, - 0xc3, 0x89, 0xbe, 0x2c, 0x58, 0x5e, 0xe8, 0x89, 0x2e, 0x20, 0x3c, 0x2b, 0x9b, 0x60, 0x5b, 0x39, - 0x52, 0x41, 0x87, 0xc2, 0x5e, 0x87, 0xd8, 0x27, 0x03, 0x97, 0x91, 0x07, 0x2f, 0xc9, 0x53, 0x44, - 0xed, 0xf7, 0xac, 0x9a, 0xbd, 0x47, 0x7c, 0x87, 0xd5, 0xfb, 0x83, 0xeb, 0xdb, 0xcf, 0xc3, 0x4f, - 0xc3, 0xeb, 0xdb, 0xa1, 0xf1, 0x17, 0xaf, 0xb2, 0xf2, 0xb0, 0x7f, 0xf3, 0xce, 0xa8, 0xf1, 0x1a, - 0x7e, 0x1e, 0x7d, 0xf3, 0x8b, 0xc1, 0x70, 0x69, 0xa3, 0xcd, 0xd8, 0x68, 0x1f, 0xb3, 0x8a, 0x9e, - 0x72, 0xde, 0xc0, 0x87, 0x61, 0x6d, 0xa3, 0x69, 0x2d, 0x4f, 0x8c, 0x72, 0xba, 0x3a, 0x35, 0x2a, - 0x67, 0xc7, 0x6c, 0xdf, 0x8d, 0x67, 0x5d, 0x37, 0x8c, 0x17, 0x9e, 0x1f, 0x3a, 0x52, 0x74, 0x23, - 0xa1, 0xe8, 0xf1, 0x70, 0x82, 0xe0, 0xac, 0x51, 0x78, 0x3a, 0xac, 0xc9, 0xa4, 0x42, 0x0f, 0xca, - 0xdb, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xd5, 0xca, 0x94, 0x7e, 0x05, 0x00, 0x00, + // 694 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x94, 0xd1, 0x73, 0xd2, 0x4a, + 0x14, 0xc6, 0x2f, 0x2d, 0xa5, 0xed, 0x52, 0x28, 0x77, 0x6f, 0x6f, 0x3d, 0xd6, 0x5a, 0x11, 0x6b, + 0xc5, 0x3a, 0x83, 0x33, 0x6a, 0x9d, 0xf1, 0x11, 0x0a, 0x8c, 0x19, 0x29, 0x65, 0x08, 0xb6, 0xbe, + 0x39, 0x4b, 0xb2, 0x64, 0x18, 0x43, 0x82, 0x9b, 0xa5, 0xb5, 0xff, 0x8e, 0x7f, 0xa9, 0x73, 0xce, + 0x6e, 0x68, 0xa8, 0x4f, 0xd9, 0xef, 0xf7, 0x9d, 0x9c, 0x9c, 0x9d, 0xfd, 0xb2, 0xac, 0x34, 0x1f, + 0xbf, 0x9d, 0x84, 0xf1, 0x6d, 0x63, 0xae, 0x62, 0x1d, 0x73, 0x86, 0x6b, 0x5a, 0x8e, 0x6b, 0xbf, + 0x19, 0x2b, 0x76, 0xc3, 0xf8, 0xf6, 0x42, 0x26, 0x89, 0x08, 0x24, 0xff, 0xc0, 0xf2, 0xa3, 0xbb, + 0xb9, 0x84, 0x5c, 0x35, 0x57, 0x2f, 0xbf, 0xab, 0x36, 0xee, 0x4b, 0x1b, 0x99, 0x32, 0x5a, 0x63, + 0xdd, 0x90, 0xaa, 0x79, 0x8d, 0xed, 0x8c, 0xa6, 0x33, 0x39, 0x94, 0x9e, 0x9c, 0xde, 0x48, 0x1f, + 0xd6, 0xaa, 0xb9, 0x7a, 0x7e, 0xb8, 0xc2, 0x78, 0x95, 0x15, 0x5d, 0xf9, 0x73, 0x21, 0x23, 0x4f, + 0xf6, 0x17, 0x33, 0xc8, 0x57, 0x73, 0xf5, 0xd2, 0x30, 0x8b, 0xb0, 0x8b, 0x2b, 0x66, 0xf3, 0x70, + 0x1a, 0x05, 0x43, 0xa1, 0x25, 0xac, 0x9b, 0x2e, 0x59, 0xc6, 0x8f, 0x59, 0x09, 0xbf, 0xdd, 0x9e, + 0x2a, 0xe9, 0xe9, 0x69, 0x1c, 0xc1, 0x29, 0xf5, 0x59, 0x85, 0xfc, 0x84, 0x95, 0xe9, 0x2d, 0xa9, + 0x9a, 0xbe, 0xaf, 0x64, 0x92, 0x40, 0xb1, 0x9a, 0xab, 0xef, 0x0c, 0x1f, 0x50, 0xec, 0x86, 0x33, + 0xe2, 0xcb, 0xae, 0x16, 0x4a, 0xc3, 0x09, 0x7d, 0x72, 0x15, 0xe2, 0xe4, 0x29, 0xe8, 0x44, 0x3e, + 0x6c, 0x50, 0x4d, 0x16, 0xf1, 0x3d, 0xb6, 0xd1, 0xba, 0xd3, 0x32, 0x81, 0x6d, 0xf2, 0x8c, 0xe0, + 0xc0, 0x36, 0x07, 0xc2, 0xfb, 0x21, 0x75, 0x02, 0x8c, 0x78, 0x2a, 0xd1, 0x71, 0x95, 0x87, 0x53, + 0x40, 0x81, 0x06, 0x4b, 0x25, 0x3a, 0xed, 0x44, 0x93, 0xb3, 0x69, 0x1c, 0x2b, 0xf1, 0x1b, 0x1d, + 0x8d, 0x47, 0x73, 0x44, 0x3b, 0x36, 0x02, 0xe9, 0x00, 0x8f, 0x07, 0xf6, 0x0c, 0x25, 0x61, 0xfb, + 0x0f, 0x62, 0xa5, 0xe1, 0x7f, 0xe2, 0xa9, 0xb4, 0xfd, 0xc9, 0xd9, 0x37, 0x8e, 0x95, 0xd8, 0xc9, + 0x55, 0x9e, 0x33, 0x01, 0x6e, 0x3a, 0x91, 0x40, 0xda, 0x4e, 0xb4, 0x33, 0x81, 0xff, 0x0c, 0x25, + 0xc1, 0xf7, 0x59, 0xc1, 0x55, 0xde, 0x85, 0xf0, 0xe0, 0x09, 0x6d, 0xcc, 0x2a, 0xe4, 0xed, 0x44, + 0x23, 0x3f, 0x34, 0xdc, 0x28, 0x3b, 0xcf, 0x55, 0x28, 0x22, 0x78, 0xbe, 0x9c, 0x07, 0xa5, 0x9d, + 0x87, 0x9c, 0xda, 0x72, 0x1e, 0x72, 0xf6, 0x59, 0x01, 0x9f, 0x8e, 0x0f, 0x4f, 0xc9, 0xb0, 0x0a, + 0x53, 0xe2, 0x44, 0x01, 0x1e, 0xdf, 0x95, 0x9a, 0x38, 0x6d, 0x78, 0x45, 0xee, 0x0a, 0xc3, 0x13, + 0xeb, 0x64, 0x4a, 0xea, 0x26, 0x6b, 0x19, 0x84, 0xfb, 0x72, 0x06, 0xa3, 0x38, 0x81, 0x47, 0x66, + 0x5f, 0x24, 0xf8, 0x29, 0xab, 0x74, 0x63, 0x75, 0x2b, 0x94, 0x3f, 0x8d, 0x02, 0x57, 0x0b, 0xbd, + 0x48, 0x00, 0xa8, 0xe0, 0x2f, 0x6e, 0x3b, 0x8c, 0x7a, 0xf0, 0x78, 0xd9, 0x61, 0xd4, 0xe3, 0x07, + 0x6c, 0x6b, 0x74, 0x3e, 0xe8, 0x86, 0x22, 0x48, 0xe0, 0x80, 0x8c, 0xa5, 0x46, 0xcf, 0xf1, 0x66, + 0x73, 0xfa, 0xbf, 0x9e, 0x19, 0x2f, 0xd5, 0xa9, 0x77, 0x1e, 0xfb, 0x12, 0xaa, 0xf7, 0x1e, 0x6a, + 0x4c, 0xa9, 0x33, 0xb8, 0xf9, 0x88, 0x61, 0xeb, 0x89, 0xb1, 0x0c, 0xe1, 0xa5, 0xc9, 0xfc, 0x0a, + 0xe4, 0x47, 0x8c, 0x75, 0x95, 0x08, 0x66, 0x32, 0xd2, 0x8e, 0x0f, 0x2f, 0xa8, 0x24, 0x43, 0xf0, + 0x9f, 0x48, 0xd5, 0xe5, 0x64, 0x92, 0x48, 0x0d, 0xc7, 0x54, 0xf3, 0x80, 0xf2, 0x3a, 0xdb, 0x6d, + 0x4d, 0x57, 0xff, 0xb1, 0xd7, 0x54, 0xf8, 0x10, 0xdb, 0xc4, 0x34, 0x5d, 0x28, 0x2f, 0x13, 0xd3, + 0x74, 0x6d, 0x62, 0x9a, 0x2e, 0xec, 0x2e, 0x13, 0xd3, 0x74, 0xf1, 0x9c, 0xfb, 0xf2, 0x97, 0xfe, + 0x1c, 0xcf, 0x61, 0xc7, 0xe4, 0xda, 0x4a, 0x7e, 0xc8, 0xb6, 0xed, 0xb2, 0xe9, 0x42, 0x89, 0xde, + 0xb9, 0x07, 0x36, 0x69, 0x7d, 0xa9, 0xa1, 0x62, 0x52, 0x60, 0x94, 0x4d, 0x1a, 0xf2, 0x7f, 0x0d, + 0x37, 0xaa, 0xe6, 0xb2, 0xad, 0xf4, 0x6e, 0xe2, 0xbb, 0xac, 0xd8, 0xed, 0x5d, 0x5e, 0x7f, 0xed, + 0x7f, 0xe9, 0x5f, 0x5e, 0xf7, 0x2b, 0xff, 0xf0, 0x22, 0xdb, 0x74, 0x91, 0x7c, 0x3f, 0xab, 0xe4, + 0x78, 0x99, 0xb1, 0x7e, 0x67, 0x44, 0xf2, 0xea, 0xac, 0xb2, 0xb6, 0xa2, 0x3f, 0x55, 0xd6, 0xf9, + 0x36, 0x9e, 0x6f, 0xd7, 0xf9, 0x56, 0xc9, 0xb7, 0xde, 0xb0, 0x03, 0x2f, 0x9e, 0x35, 0xbc, 0x30, + 0x5e, 0xf8, 0x93, 0x50, 0x28, 0xd9, 0x88, 0xa4, 0xa6, 0xab, 0x51, 0x04, 0x41, 0xab, 0x94, 0xb9, + 0x18, 0x07, 0xe3, 0x71, 0x81, 0xae, 0xcb, 0xf7, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x90, 0xd6, + 0x84, 0x9a, 0x75, 0x05, 0x00, 0x00, } diff --git a/pb/flow.proto b/pb/flow.proto index 672bda5..34f0452 100644 --- a/pb/flow.proto +++ b/pb/flow.proto @@ -8,93 +8,88 @@ message FlowMessage { enum FlowType { FLOWUNKNOWN = 0; - NFV9 = 9; - IPFIX = 10; - SFLOW = 5; + SFLOW_5 = 1; + NETFLOW_V5 = 2; + NETFLOW_V9 = 3; + IPFIX = 4; } FlowType Type = 1; - uint64 TimeRecvd = 2; - uint64 SamplingRate = 3; + uint64 TimeReceived = 2; uint32 SequenceNum = 4; + uint64 SamplingRate = 3; - // Found inside packet (To be deprecated in favor of TimeFlowEnd) - uint64 TimeFlow = 5; + uint32 FlowDirection = 42; - // Source/destination addresses - bytes SrcIP = 6; - bytes DstIP = 7; - - // To be deprecated - enum IPType { - IPUNKNOWN = 0; - IPv4 = 4; - IPv6 = 6; - } - IPType IPversion = 8; + // Sampler information + bytes SamplerAddress = 11; + + // Found inside packet + uint64 TimeFlowStart = 38; + uint64 TimeFlowEnd = 5; // Size of the sampled packet uint64 Bytes = 9; uint64 Packets = 10; - // Routing information - bytes RouterAddr = 11; - bytes NextHop = 12; - uint32 NextHopAS = 13; - - // Autonomous system information - uint32 SrcAS = 14; - uint32 DstAS = 15; - - // Prefix size - uint32 SrcNet = 16; - uint32 DstNet = 17; + // Source/destination addresses + bytes SrcAddr = 6; + bytes DstAddr = 7; - // Interfaces - uint32 SrcIf = 18; - uint32 DstIf = 19; + // Layer 3 protocol (IPv4/IPv6/ARP/...) + uint32 Etype = 30; // Layer 4 protocol uint32 Proto = 20; - // Port for UDP and TCP + // Ports for UDP and TCP uint32 SrcPort = 21; uint32 DstPort = 22; - // IP and TCP special flags - uint32 IPTos = 23; - uint32 ForwardingStatus = 24; - uint32 IPTTL = 25; - uint32 TCPFlags = 26; + // Interfaces + uint32 SrcIf = 18; + uint32 DstIf = 19; // Ethernet information uint64 SrcMac = 27; uint64 DstMac = 28; + // Vlan + uint32 SrcVlan = 33; + uint32 DstVlan = 34; // 802.1q VLAN in sampled packet uint32 VlanId = 29; - // Layer 3 protocol (IPv4/IPv6/ARP/...) - uint32 Etype = 30; + // VRF + uint32 IngressVrfID = 39; + uint32 EgressVrfID = 40; + // IP and TCP special flags + uint32 IPTos = 23; + uint32 ForwardingStatus = 24; + uint32 IPTTL = 25; + uint32 TCPFlags = 26; uint32 IcmpType = 31; uint32 IcmpCode = 32; - - // Vlan - uint32 SrcVlan = 33; - uint32 DstVlan = 34; - + uint32 IPv6FlowLabel = 37; // Fragments (IPv4/IPv6) uint32 FragmentId = 35; uint32 FragmentOffset = 36; + uint32 BiFlowDirection = 41; - uint32 IPv6FlowLabel = 37; + // Autonomous system information + uint32 SrcAS = 14; + uint32 DstAS = 15; - // VRF - uint32 IngressVrfId = 38; - uint32 EgressVrfId = 39; - - // time information, found inside packets - uint64 TimeFlowStart = 40; - uint64 TimeFlowEnd = 41; -} + bytes NextHop = 12; + uint32 NextHopAS = 13; + + // Prefix size + uint32 SrcNet = 16; + uint32 DstNet = 17; + + + // Custom fields: start after ID 1000: + // uint32 MyCustomField = 1000; + +} \ No newline at end of file diff --git a/producer/producer_nf.go b/producer/producer_nf.go index 8680615..18c7ec8 100644 --- a/producer/producer_nf.go +++ b/producer/producer_nf.go @@ -56,6 +56,17 @@ func (s *basicSamplingRateSystem) GetSamplingRate(version uint16, obsDomainId ui return 0, errors.New("") // TBC } +type SingleSamplingRateSystem struct { + Sampling uint32 +} + +func (s *SingleSamplingRateSystem) AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32) { +} + +func (s *SingleSamplingRateSystem) GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error) { + return s.Sampling, nil +} + func NetFlowLookFor(dataFields []netflow.DataField, typeId uint16) (bool, interface{}) { for _, dataField := range dataFields { if dataField.Type == typeId { @@ -130,7 +141,7 @@ func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, recor var time uint64 if version == 9 { - flowMessage.Type = flowmessage.FlowMessage_NFV9 + flowMessage.Type = flowmessage.FlowMessage_NETFLOW_V9 } else if version == 10 { flowMessage.Type = flowmessage.FlowMessage_IPFIX } @@ -186,12 +197,10 @@ func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, recor // IP case netflow.NFV9_FIELD_IPV4_SRC_ADDR: - flowMessage.IPversion = flowmessage.FlowMessage_IPv4 - flowMessage.SrcIP = v + flowMessage.SrcAddr = v flowMessage.Etype = 0x800 case netflow.NFV9_FIELD_IPV4_DST_ADDR: - flowMessage.IPversion = flowmessage.FlowMessage_IPv4 - flowMessage.DstIP = v + flowMessage.DstAddr = v flowMessage.Etype = 0x800 case netflow.NFV9_FIELD_SRC_MASK: @@ -200,12 +209,10 @@ func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, recor DecodeUNumber(v, &(flowMessage.DstNet)) case netflow.NFV9_FIELD_IPV6_SRC_ADDR: - flowMessage.IPversion = flowmessage.FlowMessage_IPv6 - flowMessage.SrcIP = v + flowMessage.SrcAddr = v flowMessage.Etype = 0x86dd case netflow.NFV9_FIELD_IPV6_DST_ADDR: - flowMessage.IPversion = flowmessage.FlowMessage_IPv6 - flowMessage.DstIP = v + flowMessage.DstAddr = v flowMessage.Etype = 0x86dd case netflow.NFV9_FIELD_IPV6_SRC_MASK: @@ -214,17 +221,13 @@ func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, recor DecodeUNumber(v, &(flowMessage.DstNet)) case netflow.NFV9_FIELD_IPV4_NEXT_HOP: - flowMessage.IPversion = flowmessage.FlowMessage_IPv4 flowMessage.NextHop = v case netflow.NFV9_FIELD_BGP_IPV4_NEXT_HOP: - flowMessage.IPversion = flowmessage.FlowMessage_IPv4 flowMessage.NextHop = v case netflow.NFV9_FIELD_IPV6_NEXT_HOP: - flowMessage.IPversion = flowmessage.FlowMessage_IPv6 flowMessage.NextHop = v case netflow.NFV9_FIELD_BGP_IPV6_NEXT_HOP: - flowMessage.IPversion = flowmessage.FlowMessage_IPv6 flowMessage.NextHop = v // ICMP @@ -260,9 +263,9 @@ func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, recor DecodeUNumber(v, &(flowMessage.DstVlan)) case netflow.IPFIX_FIELD_ingressVRFID: - DecodeUNumber(v, &(flowMessage.IngressVrfId)) + DecodeUNumber(v, &(flowMessage.IngressVrfID)) case netflow.IPFIX_FIELD_egressVRFID: - DecodeUNumber(v, &(flowMessage.EgressVrfId)) + DecodeUNumber(v, &(flowMessage.EgressVrfID)) case netflow.NFV9_FIELD_IPV4_IDENT: DecodeUNumber(v, &(flowMessage.FragmentId)) @@ -272,6 +275,12 @@ func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, recor case netflow.NFV9_FIELD_IPV6_FLOW_LABEL: DecodeUNumber(v, &(flowMessage.IPv6FlowLabel)) + case netflow.IPFIX_FIELD_biflowDirection: + DecodeUNumber(v, &(flowMessage.BiFlowDirection)) + + case netflow.NFV9_FIELD_DIRECTION: + DecodeUNumber(v, &(flowMessage.FlowDirection)) + default: if version == 9 { // NetFlow v9 time works with a differential based on router's uptime @@ -285,9 +294,7 @@ func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, recor var timeLastSwitched uint32 DecodeUNumber(v, &timeLastSwitched) timeDiff := (uptime - timeLastSwitched) / 1000 - timeFlowEnd := uint64(baseTime - timeDiff) - flowMessage.TimeFlow = timeFlowEnd // deprecate this - flowMessage.TimeFlowEnd = timeFlowEnd + flowMessage.TimeFlowEnd = uint64(baseTime - timeDiff) } } else if version == 10 { switch df.Type { @@ -303,22 +310,17 @@ func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, recor case netflow.IPFIX_FIELD_flowStartNanoseconds: DecodeUNumber(v, &time) flowMessage.TimeFlowStart = time / 1000000000 - case netflow.IPFIX_FIELD_flowEndSeconds: DecodeUNumber(v, &time) - flowMessage.TimeFlow = time // deprecate this flowMessage.TimeFlowEnd = time case netflow.IPFIX_FIELD_flowEndMilliseconds: DecodeUNumber(v, &time) - flowMessage.TimeFlow = time / 1000 // deprecate this flowMessage.TimeFlowEnd = time / 1000 case netflow.IPFIX_FIELD_flowEndMicroseconds: DecodeUNumber(v, &time) - flowMessage.TimeFlow = time / 1000000 // deprecate this flowMessage.TimeFlowEnd = time / 1000000 case netflow.IPFIX_FIELD_flowEndNanoseconds: DecodeUNumber(v, &time) - flowMessage.TimeFlow = time / 1000000000 // deprecate this flowMessage.TimeFlowEnd = time / 1000000000 } } @@ -466,7 +468,7 @@ func ProcessMessageNetFlow(msgDec interface{}, samplingRateSys SamplingRateSyste fmsg.SamplingRate = uint64(samplingRate) } default: - return flowMessageSet, errors.New("Bad NetFlow version") + return flowMessageSet, errors.New("Bad NetFlow/IPFIX version") } return flowMessageSet, nil diff --git a/producer/producer_nflegacy.go b/producer/producer_nflegacy.go new file mode 100644 index 0000000..591e297 --- /dev/null +++ b/producer/producer_nflegacy.go @@ -0,0 +1,78 @@ +package producer + +import ( + "encoding/binary" + "errors" + "github.com/cloudflare/goflow/decoders/netflowlegacy" + flowmessage "github.com/cloudflare/goflow/pb" + "net" +) + +func ConvertNetFlowLegacyRecord(baseTime uint32, uptime uint32, record netflowlegacy.RecordsNetFlowV5) *flowmessage.FlowMessage { + flowMessage := &flowmessage.FlowMessage{} + + flowMessage.Type = flowmessage.FlowMessage_NETFLOW_V5 + + timeDiffFirst := (uptime - record.First) / 1000 + timeDiffLast := (uptime - record.Last) / 1000 + flowMessage.TimeFlowStart = uint64(baseTime - timeDiffFirst) + flowMessage.TimeFlowEnd = uint64(baseTime - timeDiffLast) + + v := make(net.IP, 4) + binary.BigEndian.PutUint32(v, record.NextHop) + flowMessage.NextHop = v + v = make(net.IP, 4) + binary.BigEndian.PutUint32(v, record.SrcAddr) + flowMessage.SrcAddr = v + v = make(net.IP, 4) + binary.BigEndian.PutUint32(v, record.DstAddr) + flowMessage.DstAddr = v + + flowMessage.Etype = 0x800 + flowMessage.SrcAS = uint32(record.SrcAS) + flowMessage.DstAS = uint32(record.DstAS) + flowMessage.SrcNet = uint32(record.SrcMask) + flowMessage.DstNet = uint32(record.DstMask) + flowMessage.Proto = uint32(record.Proto) + flowMessage.TCPFlags = uint32(record.TCPFlags) + flowMessage.IPTos = uint32(record.Tos) + flowMessage.SrcIf = uint32(record.Input) + flowMessage.DstIf = uint32(record.Output) + flowMessage.SrcPort = uint32(record.SrcPort) + flowMessage.DstPort = uint32(record.DstPort) + flowMessage.Packets = uint64(record.DPkts) + flowMessage.Bytes = uint64(record.DOctets) + + return flowMessage +} + +func SearchNetFlowLegacyRecords(baseTime uint32, uptime uint32, dataRecords []netflowlegacy.RecordsNetFlowV5) []*flowmessage.FlowMessage { + flowMessageSet := make([]*flowmessage.FlowMessage, 0) + for _, record := range dataRecords { + fmsg := ConvertNetFlowLegacyRecord(baseTime, uptime, record) + if fmsg != nil { + flowMessageSet = append(flowMessageSet, fmsg) + } + } + return flowMessageSet +} + +func ProcessMessageNetFlowLegacy(msgDec interface{}) ([]*flowmessage.FlowMessage, error) { + switch packet := msgDec.(type) { + case netflowlegacy.PacketNetFlowV5: + seqnum := packet.FlowSequence + samplingRate := packet.SamplingInterval + baseTime := packet.UnixSecs + uptime := packet.SysUptime + + flowMessageSet := SearchNetFlowLegacyRecords(baseTime, uptime, packet.Records) + for _, fmsg := range flowMessageSet { + fmsg.SequenceNum = seqnum + fmsg.SamplingRate = uint64(samplingRate) + } + + return flowMessageSet, nil + default: + return []*flowmessage.FlowMessage{}, errors.New("Bad NetFlow v5 version") + } +} diff --git a/producer/producer_sf.go b/producer/producer_sf.go index 5e73426..8253688 100644 --- a/producer/producer_sf.go +++ b/producer/producer_sf.go @@ -56,8 +56,6 @@ func ParseSampledHeader(flowMessage *flowmessage.FlowMessage, sampledHeader *sfl (*flowMessage).Etype = uint32(binary.BigEndian.Uint16(etherType[0:2])) if etherType[0] == 0x8 && etherType[1] == 0x0 { // IPv4 - (*flowMessage).IPversion = flowmessage.FlowMessage_IPv4 - if len(data) >= offset+36 { nextHeader = data[offset+9] srcIP = data[offset+12 : offset+16] @@ -70,7 +68,6 @@ func ParseSampledHeader(flowMessage *flowmessage.FlowMessage, sampledHeader *sfl fragOffset = binary.BigEndian.Uint16(data[offset+6 : offset+8]) } } else if etherType[0] == 0x86 && etherType[1] == 0xdd { // IPv6 - (*flowMessage).IPversion = flowmessage.FlowMessage_IPv6 if len(data) >= offset+40 { nextHeader = data[offset+6] srcIP = data[offset+8 : offset+24] @@ -104,13 +101,13 @@ func ParseSampledHeader(flowMessage *flowmessage.FlowMessage, sampledHeader *sfl (*flowMessage).IcmpCode = uint32(dataTransport[1]) } - (*flowMessage).SrcIP = srcIP - (*flowMessage).DstIP = dstIP + (*flowMessage).SrcAddr = srcIP + (*flowMessage).DstAddr = dstIP (*flowMessage).Proto = uint32(nextHeader) (*flowMessage).IPTos = uint32(tos) (*flowMessage).IPTTL = uint32(ttl) (*flowMessage).TCPFlags = uint32(tcpflags) - + (*flowMessage).FragmentId = uint32(identification) (*flowMessage).FragmentOffset = uint32(fragOffset) } @@ -124,7 +121,7 @@ func SearchSFlowSamples(samples []interface{}) []*flowmessage.FlowMessage { var records []sflow.FlowRecord flowMessage := &flowmessage.FlowMessage{} - flowMessage.Type = flowmessage.FlowMessage_SFLOW + flowMessage.Type = flowmessage.FlowMessage_SFLOW_5 switch flowSample := flowSample.(type) { case sflow.FlowSample: @@ -151,9 +148,8 @@ func SearchSFlowSamples(samples []interface{}) []*flowmessage.FlowMessage { case sflow.SampledIPv4: ipSrc = recordData.Base.SrcIP ipDst = recordData.Base.DstIP - flowMessage.SrcIP = ipSrc - flowMessage.DstIP = ipDst - flowMessage.IPversion = flowmessage.FlowMessage_IPv4 + flowMessage.SrcAddr = ipSrc + flowMessage.DstAddr = ipDst flowMessage.Bytes = uint64(recordData.Base.Length) flowMessage.Proto = recordData.Base.Protocol flowMessage.SrcPort = recordData.Base.SrcPort @@ -163,9 +159,8 @@ func SearchSFlowSamples(samples []interface{}) []*flowmessage.FlowMessage { case sflow.SampledIPv6: ipSrc = recordData.Base.SrcIP ipDst = recordData.Base.DstIP - flowMessage.IPversion = flowmessage.FlowMessage_IPv6 - flowMessage.SrcIP = ipSrc - flowMessage.DstIP = ipDst + flowMessage.SrcAddr = ipSrc + flowMessage.DstAddr = ipDst flowMessage.Bytes = uint64(recordData.Base.Length) flowMessage.Proto = recordData.Base.Protocol flowMessage.SrcPort = recordData.Base.SrcPort @@ -208,14 +203,12 @@ func ProcessMessageSFlow(msgDec interface{}) ([]*flowmessage.FlowMessage, error) flowSamples := GetSFlowFlowSamples(&packet) flowMessageSet := SearchSFlowSamples(flowSamples) for _, fmsg := range flowMessageSet { - fmsg.RouterAddr = agent + fmsg.SamplerAddress = agent fmsg.SequenceNum = seqnum } return flowMessageSet, nil default: - return []*flowmessage.FlowMessage{}, errors.New("Bad SFlow version") + return []*flowmessage.FlowMessage{}, errors.New("Bad sFlow version") } - - return []*flowmessage.FlowMessage{}, nil } diff --git a/producer/producer_test.go b/producer/producer_test.go new file mode 100644 index 0000000..f584ba9 --- /dev/null +++ b/producer/producer_test.go @@ -0,0 +1,77 @@ +package producer + +import ( + "github.com/cloudflare/goflow/decoders/netflow" + "github.com/cloudflare/goflow/decoders/sflow" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestProcessMessageNetFlow(t *testing.T) { + records := []netflow.DataRecord{ + netflow.DataRecord{ + Values: []netflow.DataField{ + netflow.DataField{ + Type: netflow.NFV9_FIELD_IPV4_SRC_ADDR, + Value: []byte{10, 0, 0, 1}, + }, + }, + }, + } + dfs := []interface{}{ + netflow.DataFlowSet{ + Records: records, + }, + } + + pktnf9 := netflow.NFv9Packet{ + FlowSets: dfs, + } + testsr := &SingleSamplingRateSystem{1} + _, err := ProcessMessageNetFlow(pktnf9, testsr) + assert.Nil(t, err) + + pktipfix := netflow.IPFIXPacket{ + FlowSets: dfs, + } + _, err = ProcessMessageNetFlow(pktipfix, testsr) + assert.Nil(t, err) +} + +func TestProcessMessageSFlow(t *testing.T) { + sh := sflow.SampledHeader{ + FrameLength: 10, + Protocol: 1, + HeaderData: []byte{ + 0xff, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xff, 0xab, 0xcd, 0xef, 0xab, 0xbc, 0x86, 0xdd, 0x60, 0x2e, + 0xc4, 0xec, 0x01, 0xcc, 0x06, 0x40, 0xfd, 0x01, 0x00, 0x00, 0xff, 0x01, 0x82, 0x10, 0xcd, 0xff, + 0xff, 0x1c, 0x00, 0x00, 0x01, 0x50, 0xfd, 0x01, 0x00, 0x00, 0xff, 0x01, 0x00, 0x01, 0x02, 0xff, + 0xff, 0x93, 0x00, 0x00, 0x02, 0x46, 0xcf, 0xca, 0x00, 0x50, 0x05, 0x15, 0x21, 0x6f, 0xa4, 0x9c, + 0xf4, 0x59, 0x80, 0x18, 0x08, 0x09, 0x8c, 0x86, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x2a, 0x85, + 0xee, 0x9e, 0x64, 0x5c, 0x27, 0x28, + }, + } + pkt := sflow.Packet{ + Version: 5, + Samples: []interface{}{ + sflow.FlowSample{ + SamplingRate: 1, + Records: []sflow.FlowRecord{ + sflow.FlowRecord{ + Data: sh, + }, + }, + }, + sflow.ExpandedFlowSample{ + SamplingRate: 1, + Records: []sflow.FlowRecord{ + sflow.FlowRecord{ + Data: sh, + }, + }, + }, + }, + } + _, err := ProcessMessageSFlow(pkt) + assert.Nil(t, err) +} diff --git a/transport/kafka.go b/transport/kafka.go index bc09e65..974fb23 100644 --- a/transport/kafka.go +++ b/transport/kafka.go @@ -3,60 +3,156 @@ package transport import ( "crypto/tls" "crypto/x509" - log "github.com/Sirupsen/logrus" + "fmt" flowmessage "github.com/cloudflare/goflow/pb" + "github.com/cloudflare/goflow/utils" proto "github.com/golang/protobuf/proto" + //"github.com/golang/protobuf/descriptor" + "errors" + "flag" sarama "github.com/Shopify/sarama" "os" + "reflect" + "strings" +) + +var ( + KafkaTLS *bool + KafkaSASL *bool + KafkaTopic *string + KafkaSrv *string + KafkaBrk *string + + KafkaLogErrors *bool + + KafkaHashing *bool + KafkaKeying *string ) type KafkaState struct { producer sarama.AsyncProducer topic string + hashing bool + keying []string +} + +func RegisterFlags() { + KafkaTLS = flag.Bool("kafka.tls", false, "Use TLS to connect to Kafka") + KafkaSASL = flag.Bool("kafka.sasl", false, "Use SASL/PLAIN data to connect to Kafka (TLS is recommended and the environment variables KAFKA_SASL_USER and KAFKA_SASL_PASS need to be set)") + KafkaTopic = flag.String("kafka.topic", "flow-messages", "Kafka topic to produce to") + KafkaSrv = flag.String("kafka.srv", "", "SRV record containing a list of Kafka brokers (or use kafka.out.brokers)") + KafkaBrk = flag.String("kafka.brokers", "127.0.0.1:9092,[::1]:9092", "Kafka brokers list separated by commas") + + KafkaLogErrors = flag.Bool("kafka.log.err", false, "Log Kafka errors") + + KafkaHashing = flag.Bool("kafka.hashing", false, "Enable partitioning by hash instead of random") + KafkaKeying = flag.String("kafka.key", "SamplerAddr,DstAS", "Kafka list of fields to do hashing on (partition) separated by commas") +} + +func StartKafkaProducerFromArgs(log utils.Logger) (*KafkaState, error) { + addrs := make([]string, 0) + if *KafkaSrv != "" { + addrs, _ = utils.GetServiceAddresses(*KafkaSrv) + } else { + addrs = strings.Split(*KafkaBrk, ",") + } + return StartKafkaProducer(addrs, *KafkaTopic, *KafkaHashing, *KafkaKeying, *KafkaTLS, *KafkaSASL, *KafkaLogErrors, log) } -func StartKafkaProducer(addrs []string, topic string, use_tls bool, use_sasl bool) *KafkaState { +func StartKafkaProducer(addrs []string, topic string, hashing bool, keying string, useTls bool, useSasl bool, logErrors bool, log utils.Logger) (*KafkaState, error) { kafkaConfig := sarama.NewConfig() kafkaConfig.Producer.Return.Successes = false - kafkaConfig.Producer.Return.Errors = false - if use_tls { + kafkaConfig.Producer.Return.Errors = logErrors + if useTls { rootCAs, err := x509.SystemCertPool() if err != nil { - log.Fatalf("Error initializing TLS: %v", err) + return nil, errors.New(fmt.Sprintf("Error initializing TLS: %v", err)) } kafkaConfig.Net.TLS.Enable = true kafkaConfig.Net.TLS.Config = &tls.Config{RootCAs: rootCAs} } - if use_sasl { - if !use_tls { - log.Warnln("Using SASL without TLS will transmit the authentication in plaintext!") + + var keyingSplit []string + if hashing { + kafkaConfig.Producer.Partitioner = sarama.NewHashPartitioner + keyingSplit = strings.Split(keying, ",") + } + + if useSasl { + if !useTls && log != nil { + log.Warn("Using SASL without TLS will transmit the authentication in plaintext!") } kafkaConfig.Net.SASL.Enable = true kafkaConfig.Net.SASL.User = os.Getenv("KAFKA_SASL_USER") kafkaConfig.Net.SASL.Password = os.Getenv("KAFKA_SASL_PASS") if kafkaConfig.Net.SASL.User == "" && kafkaConfig.Net.SASL.Password == "" { - log.Fatalf("Kafka SASL config from environment was unsuccessful. KAFKA_SASL_USER and KAFKA_SASL_PASS need to be set.") - } else { + return nil, errors.New("Kafka SASL config from environment was unsuccessful. KAFKA_SASL_USER and KAFKA_SASL_PASS need to be set.") + } else if log != nil { log.Infof("Authenticating as user '%s'...", kafkaConfig.Net.SASL.User) } } kafkaProducer, err := sarama.NewAsyncProducer(addrs, kafkaConfig) if err != nil { - log.Fatalf("%v", err) + return nil, err } state := KafkaState{ producer: kafkaProducer, topic: topic, + hashing: hashing, + keying: keyingSplit, } - return &state + if logErrors { + go func() { + for { + select { + case msg := <-kafkaProducer.Errors(): + if log != nil { + log.Error(msg) + } + } + } + }() + } + + return &state, nil +} + +func HashProto(fields []string, flowMessage *flowmessage.FlowMessage) string { + var keyStr string + + if flowMessage != nil { + vfm := reflect.ValueOf(flowMessage) + vfm = reflect.Indirect(vfm) + + for _, kf := range fields { + fieldValue := vfm.FieldByName(kf) + if fieldValue.IsValid() { + keyStr += fmt.Sprintf("%v-", fieldValue) + } + } + } + + return keyStr } func (s KafkaState) SendKafkaFlowMessage(flowMessage *flowmessage.FlowMessage) { + var key sarama.Encoder + if s.hashing { + keyStr := HashProto(s.keying, flowMessage) + key = sarama.StringEncoder(keyStr) + } b, _ := proto.Marshal(flowMessage) s.producer.Input() <- &sarama.ProducerMessage{ Topic: s.topic, + Key: key, Value: sarama.ByteEncoder(b), } } + +func (s KafkaState) Publish(msgs []*flowmessage.FlowMessage) { + for _, msg := range msgs { + s.SendKafkaFlowMessage(msg) + } +} diff --git a/transport/transport_test.go b/transport/transport_test.go new file mode 100644 index 0000000..0791e0a --- /dev/null +++ b/transport/transport_test.go @@ -0,0 +1,15 @@ +package transport + +import ( + flowmessage "github.com/cloudflare/goflow/pb" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHash(t *testing.T) { + msg := &flowmessage.FlowMessage{ + SamplerAddress: []byte{10, 0, 0, 1}, + } + key := HashProto([]string{"SamplerAddress", "InvalidField"}, msg) + assert.Equal(t, "[10 0 0 1]-", key, "The two keys should be the same.") +} diff --git a/metrics.go b/utils/metrics.go similarity index 93% rename from metrics.go rename to utils/metrics.go index 54722a8..216f013 100644 --- a/metrics.go +++ b/utils/metrics.go @@ -1,7 +1,9 @@ -package main +package utils import ( "github.com/prometheus/client_golang/prometheus" + "strconv" + "time" ) var ( @@ -130,7 +132,7 @@ var ( ) ) -func initMetrics() { +func init() { prometheus.MustRegister(MetricTrafficBytes) prometheus.MustRegister(MetricTrafficPackets) prometheus.MustRegister(MetricPacketSizeSum) @@ -152,3 +154,17 @@ func initMetrics() { prometheus.MustRegister(SFlowSampleStatsSum) prometheus.MustRegister(SFlowSampleRecordsStatsSum) } + +func DefaultAccountCallback(name string, id int, start, end time.Time) { + DecoderProcessTime.With( + prometheus.Labels{ + "name": name, + }). + Observe(float64((end.Sub(start)).Nanoseconds()) / 1000) + DecoderStats.With( + prometheus.Labels{ + "worker": strconv.Itoa(id), + "name": name, + }). + Inc() +} diff --git a/utils/netflow.go b/utils/netflow.go new file mode 100644 index 0000000..a24a1d7 --- /dev/null +++ b/utils/netflow.go @@ -0,0 +1,349 @@ +package utils + +import ( + "bytes" + "encoding/json" + "github.com/cloudflare/goflow/decoders/netflow" + flowmessage "github.com/cloudflare/goflow/pb" + "github.com/cloudflare/goflow/producer" + "github.com/prometheus/client_golang/prometheus" + "net/http" + "strconv" + "sync" + "time" +) + +type TemplateSystem struct { + key string + templates *netflow.BasicTemplateSystem +} + +func (s *TemplateSystem) AddTemplate(version uint16, obsDomainId uint32, template interface{}) { + s.templates.AddTemplate(version, obsDomainId, template) + + typeStr := "options_template" + var templateId uint16 + switch templateIdConv := template.(type) { + case netflow.IPFIXOptionsTemplateRecord: + templateId = templateIdConv.TemplateId + case netflow.NFv9OptionsTemplateRecord: + templateId = templateIdConv.TemplateId + case netflow.TemplateRecord: + templateId = templateIdConv.TemplateId + typeStr = "template" + } + NetFlowTemplatesStats.With( + prometheus.Labels{ + "router": s.key, + "version": strconv.Itoa(int(version)), + "obs_domain_id": strconv.Itoa(int(obsDomainId)), + "template_id": strconv.Itoa(int(templateId)), + "type": typeStr, + }). + Inc() +} + +func (s *TemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { + return s.templates.GetTemplate(version, obsDomainId, templateId) +} + +type StateNetFlow struct { + Transport Transport + Logger Logger + templateslock *sync.RWMutex + templates map[string]*TemplateSystem + + samplinglock *sync.RWMutex + sampling map[string]producer.SamplingRateSystem +} + +func (s *StateNetFlow) DecodeFlow(msg interface{}) error { + pkt := msg.(BaseMessage) + buf := bytes.NewBuffer(pkt.Payload) + + key := pkt.Src.String() + samplerAddress := pkt.Src + if samplerAddress.To4() != nil { + samplerAddress = samplerAddress.To4() + } + + s.templateslock.RLock() + templates, ok := s.templates[key] + if !ok { + templates = &TemplateSystem{ + templates: netflow.CreateTemplateSystem(), + key: key, + } + s.templates[key] = templates + } + s.templateslock.RUnlock() + s.samplinglock.RLock() + sampling, ok := s.sampling[key] + if !ok { + sampling = producer.CreateSamplingSystem() + s.sampling[key] = sampling + } + s.samplinglock.RUnlock() + + timeTrackStart := time.Now() + msgDec, err := netflow.DecodeMessage(buf, templates) + if err != nil { + switch err.(type) { + case *netflow.ErrorVersion: + NetFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_version", + }). + Inc() + case *netflow.ErrorFlowId: + NetFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_flow_id", + }). + Inc() + case *netflow.ErrorTemplateNotFound: + NetFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "template_not_found", + }). + Inc() + default: + NetFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_decoding", + }). + Inc() + } + return err + } + + flowMessageSet := make([]*flowmessage.FlowMessage, 0) + + switch msgDecConv := msgDec.(type) { + case netflow.NFv9Packet: + NetFlowStats.With( + prometheus.Labels{ + "router": key, + "version": "9", + }). + Inc() + + for _, fs := range msgDecConv.FlowSets { + switch fsConv := fs.(type) { + case netflow.TemplateFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "TemplateFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "OptionsTemplateFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.NFv9OptionsTemplateFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "OptionsTemplateFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "OptionsTemplateFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.OptionsDataFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "OptionsDataFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "OptionsDataFlowSet", + }). + Add(float64(len(fsConv.Records))) + case netflow.DataFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "DataFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "DataFlowSet", + }). + Add(float64(len(fsConv.Records))) + } + } + flowMessageSet, err = producer.ProcessMessageNetFlow(msgDecConv, sampling) + + for _, fmsg := range flowMessageSet { + fmsg.TimeReceived = uint64(time.Now().UTC().Unix()) + fmsg.SamplerAddress = samplerAddress + timeDiff := fmsg.TimeReceived - fmsg.TimeFlowEnd + NetFlowTimeStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + }). + Observe(float64(timeDiff)) + } + case netflow.IPFIXPacket: + NetFlowStats.With( + prometheus.Labels{ + "router": key, + "version": "10", + }). + Inc() + + for _, fs := range msgDecConv.FlowSets { + switch fsConv := fs.(type) { + case netflow.TemplateFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "TemplateFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "TemplateFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.IPFIXOptionsTemplateFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "OptionsTemplateFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "OptionsTemplateFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.OptionsDataFlowSet: + + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "OptionsDataFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "OptionsDataFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.DataFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "DataFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "DataFlowSet", + }). + Add(float64(len(fsConv.Records))) + } + } + flowMessageSet, err = producer.ProcessMessageNetFlow(msgDecConv, sampling) + + for _, fmsg := range flowMessageSet { + fmsg.TimeReceived = uint64(time.Now().UTC().Unix()) + fmsg.SamplerAddress = samplerAddress + timeDiff := fmsg.TimeReceived - fmsg.TimeFlowEnd + NetFlowTimeStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + }). + Observe(float64(timeDiff)) + } + } + + timeTrackStop := time.Now() + DecoderTime.With( + prometheus.Labels{ + "name": "NetFlow", + }). + Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) + + if s.Transport != nil { + s.Transport.Publish(flowMessageSet) + } + + return nil +} + +func (s *StateNetFlow) ServeHTTPTemplates(w http.ResponseWriter, r *http.Request) { + tmp := make(map[string]map[uint16]map[uint32]map[uint16]interface{}) + s.templateslock.RLock() + for key, templatesrouterstr := range s.templates { + templatesrouter := templatesrouterstr.templates.GetTemplates() + tmp[key] = templatesrouter + } + s.templateslock.RUnlock() + enc := json.NewEncoder(w) + enc.Encode(tmp) +} + +func (s *StateNetFlow) InitTemplates() { + s.templates = make(map[string]*TemplateSystem) + s.templateslock = &sync.RWMutex{} + s.sampling = make(map[string]producer.SamplingRateSystem) + s.samplinglock = &sync.RWMutex{} +} + +func (s *StateNetFlow) FlowRoutine(workers int, addr string, port int) error { + s.InitTemplates() + return UDPRoutine("NetFlow", s.DecodeFlow, workers, addr, port, false, s.Logger) +} diff --git a/utils/nflegacy.go b/utils/nflegacy.go new file mode 100644 index 0000000..ae5243f --- /dev/null +++ b/utils/nflegacy.go @@ -0,0 +1,76 @@ +package utils + +import ( + "bytes" + "github.com/cloudflare/goflow/decoders/netflowlegacy" + flowmessage "github.com/cloudflare/goflow/pb" + "github.com/cloudflare/goflow/producer" + "github.com/prometheus/client_golang/prometheus" + "time" +) + +type StateNFLegacy struct { + Transport Transport + Logger Logger +} + +func (s *StateNFLegacy) DecodeFlow(msg interface{}) error { + pkt := msg.(BaseMessage) + buf := bytes.NewBuffer(pkt.Payload) + key := pkt.Src.String() + samplerAddress := pkt.Src + if samplerAddress.To4() != nil { + samplerAddress = samplerAddress.To4() + } + + timeTrackStart := time.Now() + msgDec, err := netflowlegacy.DecodeMessage(buf) + + if err != nil { + switch err.(type) { + case *netflowlegacy.ErrorVersion: + NetFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_version", + }). + Inc() + } + return err + } + + switch msgDec.(type) { + case netflowlegacy.PacketNetFlowV5: + NetFlowStats.With( + prometheus.Labels{ + "router": key, + "version": "5", + }). + Inc() + } + + var flowMessageSet []*flowmessage.FlowMessage + flowMessageSet, err = producer.ProcessMessageNetFlowLegacy(msgDec) + + timeTrackStop := time.Now() + DecoderTime.With( + prometheus.Labels{ + "name": "NetFlowV5", + }). + Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) + + for _, fmsg := range flowMessageSet { + fmsg.TimeReceived = uint64(time.Now().UTC().Unix()) + fmsg.SamplerAddress = samplerAddress + } + + if s.Transport != nil { + s.Transport.Publish(flowMessageSet) + } + + return nil +} + +func (s *StateNFLegacy) FlowRoutine(workers int, addr string, port int) error { + return UDPRoutine("NetFlowV5", s.DecodeFlow, workers, addr, port, false, s.Logger) +} diff --git a/utils/sflow.go b/utils/sflow.go new file mode 100644 index 0000000..632045e --- /dev/null +++ b/utils/sflow.go @@ -0,0 +1,135 @@ +package utils + +import ( + "bytes" + "github.com/cloudflare/goflow/decoders/sflow" + flowmessage "github.com/cloudflare/goflow/pb" + "github.com/cloudflare/goflow/producer" + "github.com/prometheus/client_golang/prometheus" + "net" + "time" +) + +type StateSFlow struct { + Transport Transport + Logger Logger +} + +func (s *StateSFlow) DecodeFlow(msg interface{}) error { + pkt := msg.(BaseMessage) + buf := bytes.NewBuffer(pkt.Payload) + key := pkt.Src.String() + + timeTrackStart := time.Now() + msgDec, err := sflow.DecodeMessage(buf) + + if err != nil { + switch err.(type) { + case *sflow.ErrorVersion: + SFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_version", + }). + Inc() + case *sflow.ErrorIPVersion: + SFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_ip_version", + }). + Inc() + case *sflow.ErrorDataFormat: + SFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_data_format", + }). + Inc() + default: + SFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_decoding", + }). + Inc() + } + return err + } + + switch msgDecConv := msgDec.(type) { + case sflow.Packet: + agentStr := net.IP(msgDecConv.AgentIP).String() + SFlowStats.With( + prometheus.Labels{ + "router": key, + "agent": agentStr, + "version": "5", + }). + Inc() + + for _, samples := range msgDecConv.Samples { + typeStr := "unknown" + countRec := 0 + switch samplesConv := samples.(type) { + case sflow.FlowSample: + typeStr = "FlowSample" + countRec = len(samplesConv.Records) + case sflow.CounterSample: + typeStr = "CounterSample" + if samplesConv.Header.Format == 4 { + typeStr = "Expanded" + typeStr + } + countRec = len(samplesConv.Records) + case sflow.ExpandedFlowSample: + typeStr = "ExpandedFlowSample" + countRec = len(samplesConv.Records) + } + SFlowSampleStatsSum.With( + prometheus.Labels{ + "router": key, + "agent": agentStr, + "version": "5", + "type": typeStr, + }). + Inc() + + SFlowSampleRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "agent": agentStr, + "version": "5", + "type": typeStr, + }). + Add(float64(countRec)) + } + + } + + var flowMessageSet []*flowmessage.FlowMessage + flowMessageSet, err = producer.ProcessMessageSFlow(msgDec) + + timeTrackStop := time.Now() + DecoderTime.With( + prometheus.Labels{ + "name": "sFlow", + }). + Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) + + ts := uint64(time.Now().UTC().Unix()) + for _, fmsg := range flowMessageSet { + fmsg.TimeReceived = ts + fmsg.TimeFlowStart = ts + fmsg.TimeFlowEnd = ts + } + + if s.Transport != nil { + s.Transport.Publish(flowMessageSet) + } + + return nil +} + +func (s *StateSFlow) FlowRoutine(workers int, addr string, port int) error { + return UDPRoutine("sFlow", s.DecodeFlow, workers, addr, port, false, s.Logger) +} diff --git a/utils/utils.go b/utils/utils.go new file mode 100644 index 0000000..9483e2e --- /dev/null +++ b/utils/utils.go @@ -0,0 +1,217 @@ +package utils + +import ( + "encoding/binary" + "errors" + "fmt" + "github.com/cloudflare/goflow/decoders" + "github.com/cloudflare/goflow/decoders/netflow" + flowmessage "github.com/cloudflare/goflow/pb" + reuseport "github.com/libp2p/go-reuseport" + "github.com/prometheus/client_golang/prometheus" + "net" + "strconv" + "time" +) + +func GetServiceAddresses(srv string) (addrs []string, err error) { + _, srvs, err := net.LookupSRV("", "", srv) + if err != nil { + return nil, errors.New(fmt.Sprintf("Service discovery: %v\n", err)) + } + for _, srv := range srvs { + addrs = append(addrs, net.JoinHostPort(srv.Target, strconv.Itoa(int(srv.Port)))) + } + return addrs, nil +} + +type Logger interface { + Printf(string, ...interface{}) + Errorf(string, ...interface{}) + Warnf(string, ...interface{}) + Warn(...interface{}) + Error(...interface{}) + Debug(...interface{}) + Debugf(string, ...interface{}) + Infof(string, ...interface{}) + Fatalf(string, ...interface{}) +} + +type BaseMessage struct { + Src net.IP + Port int + Payload []byte +} + +type Transport interface { + Publish([]*flowmessage.FlowMessage) +} + +type DefaultLogTransport struct { +} + +func (s *DefaultLogTransport) Publish(msgs []*flowmessage.FlowMessage) { + for _, msg := range msgs { + fmt.Printf("%v\n", FlowMessageToString(msg)) + } +} + +type DefaultJSONTransport struct { +} + +func (s *DefaultJSONTransport) Publish(msgs []*flowmessage.FlowMessage) { + for _, msg := range msgs { + fmt.Printf("%v\n", FlowMessageToJSON(msg)) + } +} + +type DefaultErrorCallback struct { + Logger Logger +} + +func (cb *DefaultErrorCallback) Callback(name string, id int, start, end time.Time, err error) { + if _, ok := err.(*netflow.ErrorTemplateNotFound); ok { + return + } + if cb.Logger != nil { + cb.Logger.Errorf("Error from: %v (%v) duration: %v. %v", name, id, end.Sub(start), err) + } +} + +func FlowMessageToString(fmsg *flowmessage.FlowMessage) string { + srcmac := make([]byte, 8) + dstmac := make([]byte, 8) + binary.BigEndian.PutUint64(srcmac, fmsg.SrcMac) + binary.BigEndian.PutUint64(dstmac, fmsg.DstMac) + srcmac = srcmac[2:8] + dstmac = dstmac[2:8] + + s := fmt.Sprintf("Type:%v TimeReceived:%v SequenceNum:%v SamplingRate:%v "+ + "SamplerAddress:%v TimeFlowStart:%v TimeFlowEnd:%v Bytes:%v Packets:%v SrcAddr:%v "+ + "DstAddr:%v Etype:%v Proto:%v SrcPort:%v DstPort:%v SrcIf:%v DstIf:%v SrcMac:%v "+ + "DstMac:%v SrcVlan:%v DstVlan:%v VlanId:%v IngressVrfID:%v EgressVrfID:%v IPTos:%v "+ + "ForwardingStatus:%v IPTTL:%v TCPFlags:%v IcmpType:%v IcmpCode:%v IPv6FlowLabel:%v "+ + "FragmentId:%v FragmentOffset:%v BiFlowDirection:%v SrcAS:%v DstAS:%v NextHop:%v NextHopAS:%v SrcNet:%v DstNet:%v", + fmsg.Type, fmsg.TimeReceived, fmsg.SequenceNum, fmsg.SamplingRate, net.IP(fmsg.SamplerAddress), + fmsg.TimeFlowStart, fmsg.TimeFlowEnd, fmsg.Bytes, fmsg.Packets, net.IP(fmsg.SrcAddr), net.IP(fmsg.DstAddr), + fmsg.Etype, fmsg.Proto, fmsg.SrcPort, fmsg.DstPort, fmsg.SrcIf, fmsg.DstIf, net.HardwareAddr(srcmac), + net.HardwareAddr(dstmac), fmsg.SrcVlan, fmsg.DstVlan, fmsg.VlanId, fmsg.IngressVrfID, + fmsg.EgressVrfID, fmsg.IPTos, fmsg.ForwardingStatus, fmsg.IPTTL, fmsg.TCPFlags, fmsg.IcmpType, + fmsg.IcmpCode, fmsg.IPv6FlowLabel, fmsg.FragmentId, fmsg.FragmentOffset, fmsg.BiFlowDirection, fmsg.SrcAS, fmsg.DstAS, + net.IP(fmsg.NextHop), fmsg.NextHopAS, fmsg.SrcNet, fmsg.DstNet) + return s +} + +func FlowMessageToJSON(fmsg *flowmessage.FlowMessage) string { + srcmac := make([]byte, 8) + dstmac := make([]byte, 8) + binary.BigEndian.PutUint64(srcmac, fmsg.SrcMac) + binary.BigEndian.PutUint64(dstmac, fmsg.DstMac) + srcmac = srcmac[2:8] + dstmac = dstmac[2:8] + + s := fmt.Sprintf("{\"Type\":\"%v\",\"TimeReceived\":%v,\"SequenceNum\":%v,\"SamplingRate\":%v,"+ + "\"SamplerAddress\":\"%v\",\"TimeFlowStart\":%v,\"TimeFlowEnd\":%v,\"Bytes\":%v,\"Packets\":%v,\"SrcAddr\":\"%v\","+ + "\"DstAddr\":\"%v\",\"Etype\":%v,\"Proto\":%v,\"SrcPort\":%v,\"DstPort\":%v,\"SrcIf\":%v,\"DstIf\":%v,\"SrcMac\":\"%v\","+ + "\"DstMac\":\"%v\",\"SrcVlan\":%v,\"DstVlan\":%v,\"VlanId\":%v,\"IngressVrfID\":%v,\"EgressVrfID\":%v,\"IPTos\":%v,"+ + "\"ForwardingStatus\":%v,\"IPTTL\":%v,\"TCPFlags\":%v,\"IcmpType\":%v,\"IcmpCode\":%v,\"IPv6FlowLabel\":%v,"+ + "\"FragmentId\":%v,\"FragmentOffset\":%v,\"BiFlowDirection\":%v,\"SrcAS\":%v,\"DstAS\":%v,\"NextHop\":\"%v\",\"NextHopAS\":%v,\"SrcNet\":%v,\"DstNet\":%v}", + fmsg.Type, fmsg.TimeReceived, fmsg.SequenceNum, fmsg.SamplingRate, net.IP(fmsg.SamplerAddress), + fmsg.TimeFlowStart, fmsg.TimeFlowEnd, fmsg.Bytes, fmsg.Packets, net.IP(fmsg.SrcAddr), net.IP(fmsg.DstAddr), + fmsg.Etype, fmsg.Proto, fmsg.SrcPort, fmsg.DstPort, fmsg.SrcIf, fmsg.DstIf, net.HardwareAddr(srcmac), + net.HardwareAddr(dstmac), fmsg.SrcVlan, fmsg.DstVlan, fmsg.VlanId, fmsg.IngressVrfID, + fmsg.EgressVrfID, fmsg.IPTos, fmsg.ForwardingStatus, fmsg.IPTTL, fmsg.TCPFlags, fmsg.IcmpType, + fmsg.IcmpCode, fmsg.IPv6FlowLabel, fmsg.FragmentId, fmsg.FragmentOffset, fmsg.BiFlowDirection, fmsg.SrcAS, fmsg.DstAS, + net.IP(fmsg.NextHop), fmsg.NextHopAS, fmsg.SrcNet, fmsg.DstNet) + return s +} + +func UDPRoutine(name string, decodeFunc decoder.DecoderFunc, workers int, addr string, port int, sockReuse bool, logger Logger) error { + ecb := DefaultErrorCallback{ + Logger: logger, + } + + decoderParams := decoder.DecoderParams{ + DecoderFunc: decodeFunc, + DoneCallback: DefaultAccountCallback, + ErrorCallback: ecb.Callback, + } + + processor := decoder.CreateProcessor(workers, decoderParams, name) + processor.Start() + + addrUDP := net.UDPAddr{ + IP: net.ParseIP(addr), + Port: port, + } + + var udpconn *net.UDPConn + var err error + + if sockReuse { + pconn, err := reuseport.ListenPacket("udp", addrUDP.String()) + defer pconn.Close() + if err != nil { + return err + } + var ok bool + udpconn, ok = pconn.(*net.UDPConn) + if !ok { + return err + } + } else { + udpconn, err = net.ListenUDP("udp", &addrUDP) + defer udpconn.Close() + if err != nil { + return err + } + } + + payload := make([]byte, 9000) + + localIP := addrUDP.IP.String() + if addrUDP.IP == nil { + localIP = "" + } + + for { + size, pktAddr, _ := udpconn.ReadFromUDP(payload) + payloadCut := make([]byte, size) + copy(payloadCut, payload[0:size]) + + baseMessage := BaseMessage{ + Src: pktAddr.IP, + Port: pktAddr.Port, + Payload: payloadCut, + } + processor.ProcessMessage(baseMessage) + + MetricTrafficBytes.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "remote_port": strconv.Itoa(pktAddr.Port), + "local_ip": localIP, + "local_port": strconv.Itoa(addrUDP.Port), + "type": name, + }). + Add(float64(size)) + MetricTrafficPackets.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "remote_port": strconv.Itoa(pktAddr.Port), + "local_ip": localIP, + "local_port": strconv.Itoa(addrUDP.Port), + "type": name, + }). + Inc() + MetricPacketSizeSum.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "remote_port": strconv.Itoa(pktAddr.Port), + "local_ip": localIP, + "local_port": strconv.Itoa(addrUDP.Port), + "type": name, + }). + Observe(float64(size)) + } +} From 1de649b8316feabb5654168e3a5a4a2bfa85d663 Mon Sep 17 00:00:00 2001 From: Louis Poinsignon Date: Tue, 6 Aug 2019 17:54:05 -0700 Subject: [PATCH 2/2] Travis integration --- .travis.yml | 57 +++++++++++++++++------- Dockerfile | 4 +- Makefile | 88 ++++++++++++++++++++++++++++---------- cmd/cnetflow/cnetflow.go | 3 +- cmd/cnflegacy/cnflegacy.go | 3 +- cmd/csflow/csflow.go | 3 +- cmd/goflow/goflow.go | 3 +- docker-compose-pkg.yml | 11 +++++ package/Dockerfile | 9 ++++ package/goflow.env | 1 + package/goflow.service | 11 +++++ 11 files changed, 150 insertions(+), 43 deletions(-) create mode 100644 docker-compose-pkg.yml create mode 100644 package/Dockerfile create mode 100644 package/goflow.env create mode 100644 package/goflow.service diff --git a/.travis.yml b/.travis.yml index 6db423e..c0cacff 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,15 +1,42 @@ -language: go -go_import_path: github.com/cloudflare/goflow -go: - - 1.12.x - -script: - - GO111MODULE=on make - -notifications: - email: - recipients: - - louis@cloudflare.com - on_success: never - on_failure: change - +jobs: + include: + # Test + - stage: test + os: linux + language: go + env: + GO111MODULE=on + script: + - make test-race vet test + # Compile + - stage: compile + os: linux + language: go + env: + GO111MODULE=on + BUILDINFOSDET=-travis + before_install: + - sudo apt-get update + - sudo apt-get install -y rpm ruby ruby-dev + - sudo gem install fpm + script: + - GOOS=linux make build-goflow-light + - GOOS=linux make build-goflow + - GOOS=darwin make build-goflow + - GOOS=windows EXTENSION=.exe make build-goflow + - make package-deb-goflow package-rpm-goflow + deploy: + provider: releases + api_key: + secure: eg1OSNzXVSVsCx/n7xSJAtAw7NlgtnK57EyJmrwGgvcs5OUm5cvsnK3isuWwsAFanW6b69UoyyZDayIj72poiTVGo5705lL1sN39LxypmlkpmOFJaMggIdbPAN4fB6anRHp+MBGMvxGjeJP/97JKnPXcyK+QevqxRl2sMFRjLthTBManET7ahAhD5HqsdT/MeFORCymlJ+sIRXkLHrtBdiW/KXLLzsKn3C4/OPP3Z08ggqDix7I3zLaHW7nAvug3h5V5I84FiedEgO+w7McMjX8ri2Fz/sXNz3AaQIgBUxkmnIEvv4b9nFkd3HjIHRyS6iPpcdrqGXcMqW2SVHOJ668t140MLKrZyoCj4yi0UzqjY5F6iBCy5GSz8TBbz1Mo7TF6ieVeAaC0WZImO1aRHQeBNY/5NjvmwCXLDq7sUyxcHbfSa39/Pn6sD5yZkNsSEpTJ9AHxo2/os4NxQJ6l4nV/vseNDUnhcLf3irCBpsv1k1q6EgAO4kCdELSDMaYasZm2p4U9PDiGP1tyxWoglQKzma0sR1FGnOpUQB1Wl6ZWeW4IotHLb6QQRLfERPueWgENi2etDs88lLY1EuCamFoY19nWXROCiUEYFthK6csapgQw7y4hIcup2/gB0eNVoWbGB16MYQD2W47gj6LUGDSQMAjXffymugde71R46JQ= + file_glob: true + file: dist/* + skip_cleanup: true + on: + tags: true + repo: cloudflare/goflow + - dist: trusty + services: + - docker + script: + - make docker-goflow diff --git a/Dockerfile b/Dockerfile index e1718b8..bf7a03f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,12 @@ FROM golang:alpine as builder -ARG VERSION="" +ARG LDFLAGS="" RUN apk --update --no-cache add git build-base gcc COPY . /build WORKDIR /build -RUN go build -ldflags "-X main.version=${VERSION}" -o goflow cmd/goflow/goflow.go +RUN go build -ldflags "${LDFLAGS}" -o goflow cmd/goflow/goflow.go FROM alpine:latest ARG src_dir diff --git a/Makefile b/Makefile index 984b28f..5166b99 100644 --- a/Makefile +++ b/Makefile @@ -1,31 +1,28 @@ -IMAGE ?= cloudflare/goflow -VERSION ?= $(shell git describe --tags --always --dirty) -VERSION_DOCKER ?= $(shell git describe --tags --abbrev=0 --always --dirty) - +EXTENSION ?= +DIST_DIR ?= dist/ GOOS ?= linux ARCH ?= $(shell uname -m) +BUILDINFOSDET ?= -.PHONY: all -all: test-race vet test +DOCKER_REPO := cloudflare/ +GOFLOW_NAME := goflow +GOFLOW_VERSION := $(shell git describe --tags $(git rev-list --tags --max-count=1)) +VERSION_PKG := $(shell echo $(GOFLOW_VERSION) | sed 's/^v//g') +ARCH := x86_64 +LICENSE := BSD-3 +URL := https://github.com/cloudflare/goflow +DESCRIPTION := GoFlow: an sFlow/IPFIX/NetFlow v9/v5 collector to Kafka +BUILDINFOS := ($(shell date +%FT%T%z)$(BUILDINFOSDET)) +LDFLAGS := '-X main.version=$(GOFLOW_VERSION) -X main.buildinfos=$(BUILDINFOS)' -.PHONY: clean -clean: - rm -rf bin +OUTPUT_GOFLOW := $(DIST_DIR)goflow-$(GOFLOW_VERSION)-$(GOOS)-$(ARCH)$(EXTENSION) -.PHONY: build -build: - @echo compiling code - mkdir bin - GOOS=$(GOOS) go build -ldflags '-X main.version=$(VERSION)' -o bin/goflow-$(GOOS)-$(ARCH) cmd/goflow/goflow.go - GOOS=$(GOOS) go build -ldflags '-X main.version=$(VERSION)' -o bin/goflow-sflow-$(GOOS)-$(ARCH) cmd/csflow/csflow.go - GOOS=$(GOOS) go build -ldflags '-X main.version=$(VERSION)' -o bin/goflow-netflow-$(GOOS)-$(ARCH) cmd/cnetflow/cnetflow.go - GOOS=$(GOOS) go build -ldflags '-X main.version=$(VERSION)' -o bin/goflow-nflegacy-$(GOOS)-$(ARCH) cmd/cnflegacy/cnflegacy.go +OUTPUT_GOFLOW_LIGHT_SFLOW := $(DIST_DIR)goflow-sflow-$(GOFLOW_VERSION)-$(GOOS)-$(ARCH)$(EXTENSION) +OUTPUT_GOFLOW_LIGHT_NF := $(DIST_DIR)goflow-netflow-$(GOFLOW_VERSION)-$(GOOS)-$(ARCH)$(EXTENSION) +OUTPUT_GOFLOW_LIGHT_NFV5 := $(DIST_DIR)goflow-nflegacy-$(GOFLOW_VERSION)-$(GOOS)-$(ARCH)$(EXTENSION) - -.PHONY: container -container: - @echo build docker container - docker build --build-arg VERSION=$(VERSION) -t $(IMAGE):$(VERSION_DOCKER) . +.PHONY: all +all: test-race vet test .PHONY: proto proto: @@ -46,3 +43,50 @@ vet: test-race: @echo testing code for races go test -race ./... + +.PHONY: prepare +prepare: + mkdir -p $(DIST_DIR) + +.PHONY: clean +clean: + rm -rf $(DIST_DIR) + +.PHONY: build-goflow +build-goflow: prepare + go build -ldflags $(LDFLAGS) -o $(OUTPUT_GOFLOW) cmd/goflow/goflow.go + +.PHONY: build-goflow-light +build-goflow-light: prepare + go build -ldflags $(LDFLAGS) -o $(OUTPUT_GOFLOW_LIGHT_SFLOW) cmd/csflow/csflow.go + go build -ldflags $(LDFLAGS) -o $(OUTPUT_GOFLOW_LIGHT_NF) cmd/cnetflow/cnetflow.go + go build -ldflags $(LDFLAGS) -o $(OUTPUT_GOFLOW_LIGHT_NFV5) cmd/cnflegacy/cnflegacy.go + +.PHONY: docker-goflow +docker-goflow: + docker build -t $(DOCKER_REPO)$(GOFLOW_NAME):$(GOFLOW_VERSION) --build-arg LDFLAGS=$(LDFLAGS) -f Dockerfile . + +.PHONY: package-deb-goflow +package-deb-goflow: prepare + fpm -s dir -t deb -n $(GOFLOW_NAME) -v $(VERSION_PKG) \ + --description "$(DESCRIPTION)" \ + --url "$(URL)" \ + --architecture $(ARCH) \ + --license "$(LICENSE)" \ + --deb-no-default-config-files \ + --package $(DIST_DIR) \ + $(OUTPUT_GOFLOW)=/usr/bin/goflow \ + package/goflow.service=/lib/systemd/system/goflow.service \ + package/goflow.env=/etc/default/goflow + +.PHONY: package-rpm-goflow +package-rpm-goflow: prepare + fpm -s dir -t rpm -n $(GOFLOW_NAME) -v $(VERSION_PKG) \ + --description "$(DESCRIPTION)" \ + --url "$(URL)" \ + --architecture $(ARCH) \ + --license "$(LICENSE) "\ + --package $(DIST_DIR) \ + $(OUTPUT_GOFLOW)=/usr/bin/goflow \ + package/goflow.service=/lib/systemd/system/goflow.service \ + package/goflow.env=/etc/default/goflow diff --git a/cmd/cnetflow/cnetflow.go b/cmd/cnetflow/cnetflow.go index c2ef8cb..7ce8a48 100644 --- a/cmd/cnetflow/cnetflow.go +++ b/cmd/cnetflow/cnetflow.go @@ -15,7 +15,8 @@ import ( var ( version = "" - AppVersion = "GoFlow NetFlow " + version + buildinfos = "" + AppVersion = "GoFlow NetFlow " + version + " " + buildinfos Addr = flag.String("addr", "", "NetFlow/IPFIX listening address") Port = flag.Int("port", 2055, "NetFlow/IPFIX listening port") diff --git a/cmd/cnflegacy/cnflegacy.go b/cmd/cnflegacy/cnflegacy.go index 30f1d18..60ea52a 100644 --- a/cmd/cnflegacy/cnflegacy.go +++ b/cmd/cnflegacy/cnflegacy.go @@ -15,7 +15,8 @@ import ( var ( version = "" - AppVersion = "GoFlow NetFlowv5 " + version + buildinfos = "" + AppVersion = "GoFlow NetFlowV5 " + version + " " + buildinfos Addr = flag.String("addr", "", "NetFlow v5 listening address") Port = flag.Int("port", 2055, "NetFlow v5 listening port") diff --git a/cmd/csflow/csflow.go b/cmd/csflow/csflow.go index bde0ad0..2b29808 100644 --- a/cmd/csflow/csflow.go +++ b/cmd/csflow/csflow.go @@ -15,7 +15,8 @@ import ( var ( version = "" - AppVersion = "GoFlow sFlow " + version + buildinfos = "" + AppVersion = "GoFlow sFlow " + version + " " + buildinfos Addr = flag.String("addr", "", "sFlow listening address") Port = flag.Int("port", 6343, "sFlow listening port") diff --git a/cmd/goflow/goflow.go b/cmd/goflow/goflow.go index aba776c..0f7ece6 100644 --- a/cmd/goflow/goflow.go +++ b/cmd/goflow/goflow.go @@ -17,7 +17,8 @@ import ( var ( version = "" - AppVersion = "GoFlow " + version + buildinfos = "" + AppVersion = "GoFlow " + version + " " + buildinfos SFlowEnable = flag.Bool("sflow", true, "Enable sFlow") SFlowAddr = flag.String("sflow.addr", "", "sFlow listening address") diff --git a/docker-compose-pkg.yml b/docker-compose-pkg.yml new file mode 100644 index 0000000..ab0788d --- /dev/null +++ b/docker-compose-pkg.yml @@ -0,0 +1,11 @@ +version: '3' +services: + packager: + build: package + entrypoint: make + command: + - build-goflow + - package-deb-goflow + - package-rpm-goflow + volumes: + - ./:/work/ \ No newline at end of file diff --git a/package/Dockerfile b/package/Dockerfile new file mode 100644 index 0000000..1b42a3f --- /dev/null +++ b/package/Dockerfile @@ -0,0 +1,9 @@ +FROM ruby + +RUN apt-get update && \ + apt-get install -y git make rpm golang && \ + gem install fpm + +WORKDIR /work + +ENTRYPOINT [ "/bin/bash" ] \ No newline at end of file diff --git a/package/goflow.env b/package/goflow.env new file mode 100644 index 0000000..256fca3 --- /dev/null +++ b/package/goflow.env @@ -0,0 +1 @@ +GOFLOW_ARGS= \ No newline at end of file diff --git a/package/goflow.service b/package/goflow.service new file mode 100644 index 0000000..0d55bbc --- /dev/null +++ b/package/goflow.service @@ -0,0 +1,11 @@ +[Unit] +Description=GoFlow +After=network.target + +[Service] +Type=simple +EnvironmentFile=/etc/default/goflow +ExecStart=/usr/bin/goflow $GOFLOW_ARGS + +[Install] +WantedBy=multi-user.target \ No newline at end of file