diff --git a/Plugins/BenchmarkTool/BenchmarkTool+Baselines.swift b/Plugins/BenchmarkTool/BenchmarkTool+Baselines.swift index 2a1abb37..6a007bfb 100644 --- a/Plugins/BenchmarkTool/BenchmarkTool+Baselines.swift +++ b/Plugins/BenchmarkTool/BenchmarkTool+Baselines.swift @@ -84,10 +84,10 @@ struct BenchmarkBaseline: Codable { var metrics: BenchmarkResult } - init(baselineName: String, machine: BenchmarkMachine, results: [BenchmarkIdentifier: [BenchmarkResult]]) { + init(baselineName: String, machine: BenchmarkMachine, results: [BenchmarkIdentifier: Profile]) { self.baselineName = baselineName self.machine = machine - self.results = results + self.profiles = results } // @discardableResult @@ -95,17 +95,29 @@ struct BenchmarkBaseline: Codable { if machine != otherBaseline.machine { print("Warning: Merging baselines from two different machine configurations") } - results.merge(otherBaseline.results) { first, _ in first } + profiles.merge(otherBaseline.profiles) { first, _ in first } return self } var baselineName: String var machine: BenchmarkMachine - var results: BenchmarkResultsByIdentifier + var profiles: [BenchmarkIdentifier: Profile] + + /// Represents a particular execution of a specific benchmark + /// and its set of results. + struct Profile: Codable { + var benchmark: Benchmark + var results: [BenchmarkResult] + + init(benchmark: Benchmark, results: [BenchmarkResult] = []) { + self.benchmark = benchmark + self.results = results + } + } var benchmarkIdentifiers: [BenchmarkIdentifier] { - Array(results.keys).sorted(by: { ($0.target, $0.name) < ($1.target, $1.name) }) + Array(profiles.keys).sorted(by: { ($0.target, $0.name) < ($1.target, $1.name) }) } var targets: [String] { @@ -118,8 +130,8 @@ struct BenchmarkBaseline: Codable { var benchmarkMetrics: [BenchmarkMetric] { var results: [BenchmarkMetric] = [] - self.results.forEach { _, resultVector in - resultVector.forEach { + self.profiles.forEach { _, profile in + profile.results.forEach { results.append($0.metric) } } @@ -129,8 +141,8 @@ struct BenchmarkBaseline: Codable { func resultEntriesMatching(_ closure: (BenchmarkIdentifier, BenchmarkResult) -> (Bool, String)) -> [ResultsEntry] { var results: [ResultsEntry] = [] - self.results.forEach { identifier, resultVector in - resultVector.forEach { + self.profiles.forEach { identifier, profile in + profile.results.forEach { let (include, description) = closure(identifier, $0) if include { results.append(ResultsEntry(description: description, metrics: $0)) @@ -143,8 +155,8 @@ struct BenchmarkBaseline: Codable { func metricsMatching(_ closure: (BenchmarkIdentifier, BenchmarkResult) -> Bool) -> [BenchmarkMetric] { var results: [BenchmarkMetric] = [] - self.results.forEach { identifier, resultVector in - resultVector.forEach { + self.profiles.forEach { identifier, profile in + profile.results.forEach { if closure(identifier, $0) { results.append($0.metric) } @@ -156,8 +168,8 @@ struct BenchmarkBaseline: Codable { func resultsMatching(_ closure: (BenchmarkIdentifier, BenchmarkResult) -> Bool) -> [BenchmarkResult] { var results: [BenchmarkResult] = [] - self.results.forEach { identifier, resultVector in - resultVector.forEach { + self.profiles.forEach { identifier, profile in + profile.results.forEach { if closure(identifier, $0) { results.append($0) } @@ -168,8 +180,8 @@ struct BenchmarkBaseline: Codable { } func resultsByTarget(_ target: String) -> [String: [BenchmarkResult]] { - let filteredResults = results.filter { $0.key.target == target }.sorted(by: { $0.key.name < $1.key.name }) - let resultsPerTarget = Dictionary(uniqueKeysWithValues: filteredResults.map { key, value in (key.name, value) }) + let filteredResults = profiles.filter { $0.key.target == target }.sorted(by: { $0.key.name < $1.key.name }) + let resultsPerTarget = Dictionary(uniqueKeysWithValues: filteredResults.map { key, value in (key.name, value.results) }) return resultsPerTarget } @@ -425,10 +437,10 @@ extension BenchmarkBaseline: Equatable { var warningPrinted = false var allDeviationResults = BenchmarkResult.ThresholdDeviations() - for (lhsBenchmarkIdentifier, lhsBenchmarkResults) in lhs.results { - for lhsBenchmarkResult in lhsBenchmarkResults { - if let rhsResults = rhs.results.first(where: { $0.key == lhsBenchmarkIdentifier }) { - if let rhsBenchmarkResult = rhsResults.value.first(where: { $0.metric == lhsBenchmarkResult.metric }) { + for (lhsBenchmarkIdentifier, lhsBenchmarkProfiles) in lhs.profiles { + for lhsBenchmarkResult in lhsBenchmarkProfiles.results { + if let rhsProfile = rhs.profiles.first(where: { $0.key == lhsBenchmarkIdentifier }) { + if let rhsBenchmarkResult = rhsProfile.value.results.first(where: { $0.metric == lhsBenchmarkResult.metric }) { let thresholds = thresholdsForBenchmarks(benchmarks, name: lhsBenchmarkIdentifier.name, target: lhsBenchmarkIdentifier.target, @@ -462,8 +474,8 @@ extension BenchmarkBaseline: Equatable { [BenchmarkMetric: BenchmarkThresholds.AbsoluteThreshold]]) -> BenchmarkResult.ThresholdDeviations { var allDeviationResults = BenchmarkResult.ThresholdDeviations() - for (lhsBenchmarkIdentifier, lhsBenchmarkResults) in results { - for lhsBenchmarkResult in lhsBenchmarkResults { + for (lhsBenchmarkIdentifier, lhsBenchmarkProfile) in profiles { + for lhsBenchmarkResult in lhsBenchmarkProfile.results { let thresholds = thresholdsForBenchmarks(benchmarks, name: lhsBenchmarkIdentifier.name, target: lhsBenchmarkIdentifier.target, @@ -492,10 +504,10 @@ extension BenchmarkBaseline: Equatable { return false } - for (lhsBenchmarkIdentifier, lhsBenchmarkResults) in lhs.results { - for lhsBenchmarkResult in lhsBenchmarkResults { - if let rhsResults = rhs.results.first(where: { $0.key == lhsBenchmarkIdentifier }) { - if let rhsBenchmarkResult = rhsResults.value.first(where: { $0.metric == lhsBenchmarkResult.metric }) { + for (lhsBenchmarkIdentifier, lhsBenchmarkProfile) in lhs.profiles { + for lhsBenchmarkResult in lhsBenchmarkProfile.results { + if let rhsProfile = rhs.profiles.first(where: { $0.key == lhsBenchmarkIdentifier }) { + if let rhsBenchmarkResult = rhsProfile.value.results.first(where: { $0.metric == lhsBenchmarkResult.metric }) { if lhsBenchmarkResult != rhsBenchmarkResult { return false } diff --git a/Plugins/BenchmarkTool/BenchmarkTool+Export+InfluxCSVFormatter.swift b/Plugins/BenchmarkTool/BenchmarkTool+Export+InfluxCSVFormatter.swift index e7852fad..a03d2d88 100644 --- a/Plugins/BenchmarkTool/BenchmarkTool+Export+InfluxCSVFormatter.swift +++ b/Plugins/BenchmarkTool/BenchmarkTool+Export+InfluxCSVFormatter.swift @@ -19,9 +19,16 @@ struct ExportableBenchmark: Codable { struct TestData: Codable { var test: String + var tags: [String: String] + var fields: [String: Field] var iterations: Int var warmupIterations: Int var data: [TestMetricData] + + struct Field: Codable { + let type: String + let value: String + } } struct TestMetricData: Codable { @@ -53,18 +60,30 @@ class InfluxCSVFormatter { let processors = machine.processors let memory = machine.memory - if header { - let dataTypeHeader = "#datatype tag,tag,tag,tag,tag,tag,tag,tag,tag,double,double,long,long,dateTime\n" - finalFileFormat.append(dataTypeHeader) - let headers = "measurement,hostName,processoryType,processors,memory,kernelVersion,metric,unit,test,value,test_average,iterations,warmup_iterations,time\n" - finalFileFormat.append(headers) - } - for testData in exportableBenchmark.benchmarks { + let orderedTags = testData.tags.map({ (key: $0, value: $1) }) + let orderedFields = testData.fields.map({ (key: $0, field: $1) }) + + let customHeaderDataTypes = String(repeating: "tag,", count: orderedTags.count) + + orderedFields.map({ "\($0.field.type)," }).joined() + + let customHeaders = (orderedTags.map({ "\($0.key)," }) + + orderedFields.map({ "\($0.key)," })).joined() + + if header { + let dataTypeHeader = "#datatype tag,tag,tag,tag,tag,tag,tag,tag,tag,\(customHeaderDataTypes)double,double,long,long,dateTime\n" + finalFileFormat.append(dataTypeHeader) + let headers = "measurement,hostName,processoryType,processors,memory,kernelVersion,metric,unit,test,\(customHeaders)value,test_average,iterations,warmup_iterations,time\n" + finalFileFormat.append(headers) + } + let testName = testData.test let iterations = testData.iterations let warmup_iterations = testData.warmupIterations + let customTagValues = orderedTags.map({ "\($0.value)," }).joined() + let customFieldValues = orderedFields.map({ "\($0.field.value)," }).joined() + for granularData in testData.data { let metric = granularData.metric .replacingOccurrences(of: " ", with: "") @@ -73,10 +92,11 @@ class InfluxCSVFormatter { for dataTableValue in granularData.metricsdata { let time = ISO8601DateFormatter().string(from: Date()) - let dataLine = "\(exportableBenchmark.target),\(hostName),\(processorType),\(processors),\(memory),\(kernelVersion),\(metric),\(units),\(testName),\(dataTableValue),\(average),\(iterations),\(warmup_iterations),\(time)\n" + let dataLine = "\(exportableBenchmark.target),\(hostName),\(processorType),\(processors),\(memory),\(kernelVersion),\(metric),\(units),\(testName),\(customTagValues)\(customFieldValues)\(dataTableValue),\(average),\(iterations),\(warmup_iterations),\(time)\n" finalFileFormat.append(dataLine) } } + finalFileFormat.append("\n") } return finalFileFormat @@ -114,7 +134,7 @@ extension BenchmarkTool { baseline.targets.forEach { key in let exportStruct = saveExportableResults(BenchmarkBaseline(baselineName: baseline.baselineName, machine: benchmarkMachine(), - results: baseline.results), + results: baseline.profiles), target: key) let formatter = InfluxCSVFormatter(exportableBenchmark: exportStruct) @@ -128,14 +148,14 @@ extension BenchmarkTool { } func saveExportableResults(_ benchmarks: BenchmarkBaseline, target: String) -> ExportableBenchmark { - var keys = benchmarks.results.keys.sorted(by: { $0.name < $1.name }) + var keys = benchmarks.profiles.keys.sorted(by: { $0.name < $1.name }) var testList: [TestData] = [] keys.removeAll(where: { $0.target != target }) keys.forEach { test in - if let value = benchmarks.results[test] { + if let profile = benchmarks.profiles[test] { var allResults: [BenchmarkResult] = [] - value.forEach { result in + profile.results.forEach { result in allResults.append(result) } @@ -161,9 +181,23 @@ extension BenchmarkTool { iterations = results.statistics.measurementCount warmupIterations = results.warmupIterations } + + let exportConfig = profile.benchmark.configuration.exportConfigurations?[.influx] as? InfluxExportConfiguration + + var tags: [String: String] = [:] + var fields: [String: TestData.Field] = [:] + for (tag, value) in profile.benchmark.configuration.tags { + if let field = exportConfig?.fields[tag] { + fields[tag] = TestData.Field(type: field.rawValue, value: value) + } else { + tags[tag] = value + } + } testList.append( TestData(test: cleanedTestName, + tags: tags, + fields: fields, iterations: iterations, warmupIterations: warmupIterations, data: benchmarkResultData) diff --git a/Plugins/BenchmarkTool/BenchmarkTool+Export.swift b/Plugins/BenchmarkTool/BenchmarkTool+Export.swift index 7cce2778..9e3d693e 100644 --- a/Plugins/BenchmarkTool/BenchmarkTool+Export.swift +++ b/Plugins/BenchmarkTool/BenchmarkTool+Export.swift @@ -157,8 +157,8 @@ extension BenchmarkTool { try write(exportData: "\(convertToInflux(baseline))", fileName: "\(baselineName).influx.csv") case .histogram: - try baseline.results.forEach { key, results in - try results.forEach { values in + try baseline.profiles.forEach { key, profile in + try profile.results.forEach { values in let outputString = values.statistics.histogram let description = values.metric.rawDescription try write(exportData: "\(outputString)", @@ -169,10 +169,10 @@ extension BenchmarkTool { try write(exportData: "\(convertToJMH(baseline))", fileName: cleanupStringForShellSafety("\(baselineName).jmh.json")) case .histogramSamples: - try baseline.results.forEach { key, results in + try baseline.profiles.forEach { key, profile in var outputString = "" - try results.forEach { values in + try profile.results.forEach { values in let histogram = values.statistics.histogram outputString += "\(values.metric.description) \(values.unitDescriptionPretty)\n" @@ -189,10 +189,10 @@ extension BenchmarkTool { } } case .histogramEncoded: - try baseline.results.forEach { key, results in + try baseline.profiles.forEach { key, profile in let encoder = JSONEncoder() - try results.forEach { values in + try profile.results.forEach { values in let histogram = values.statistics.histogram let jsonData = try encoder.encode(histogram) let description = values.metric.rawDescription @@ -207,8 +207,8 @@ extension BenchmarkTool { case .histogramPercentiles: var outputString = "" - try baseline.results.forEach { key, results in - try results.forEach { values in + try baseline.profiles.forEach { key, profile in + try profile.results.forEach { values in let histogram = values.statistics.histogram outputString += "Percentile\t" + "\(values.metric.description) \(values.unitDescriptionPretty)\n" @@ -224,12 +224,12 @@ extension BenchmarkTool { } } case .metricP90AbsoluteThresholds: - try baseline.results.forEach { key, results in + try baseline.profiles.forEach { key, profile in let jsonEncoder = JSONEncoder() jsonEncoder.outputFormatting = [.prettyPrinted, .sortedKeys] var outputResults: [String: BenchmarkThresholds.AbsoluteThreshold] = [:] - results.forEach { values in + profile.results.forEach { values in outputResults[values.metric.rawDescription] = Int(values.statistics.histogram.valueAtPercentile(90.0)) } diff --git a/Plugins/BenchmarkTool/BenchmarkTool+Operations.swift b/Plugins/BenchmarkTool/BenchmarkTool+Operations.swift index cf923015..c8e79431 100644 --- a/Plugins/BenchmarkTool/BenchmarkTool+Operations.swift +++ b/Plugins/BenchmarkTool/BenchmarkTool+Operations.swift @@ -47,8 +47,8 @@ extension BenchmarkTool { } } - mutating func runBenchmark(target: String, benchmark: Benchmark) throws -> BenchmarkResults { - var benchmarkResults: BenchmarkResults = [:] + mutating func runBenchmark(target: String, benchmark: Benchmark) throws -> BenchmarkResultsByIdentifier { + var benchmarkResults: BenchmarkResultsByIdentifier = [:] try write(.run(benchmark: benchmark)) outerloop: while true { @@ -126,7 +126,7 @@ extension BenchmarkTool { } if benchmarks.isEmpty { // if we read from baseline and didn't run them, we put in some fake entries for the compare - currentBaseline.results.keys.forEach { baselineKey in + currentBaseline.profiles.keys.forEach { baselineKey in if let benchmark: Benchmark = .init(baselineKey.name, closure:{_ in}) { benchmark.target = baselineKey.target benchmarks.append(benchmark) @@ -230,7 +230,7 @@ extension BenchmarkTool { let baseline = benchmarkBaselines[0] if let baselineName = self.baseline.first { try baseline.targets.forEach { target in - let results = baseline.results.filter { $0.key.target == target } + let results = baseline.profiles.filter { $0.key.target == target } let subset = BenchmarkBaseline(baselineName: baselineName, machine: baseline.machine, results: results) @@ -257,7 +257,7 @@ extension BenchmarkTool { } if benchmarks.isEmpty { // if we read from baseline and didn't run them, we put in some fake entries for the compare - currentBaseline.results.keys.forEach { baselineKey in + currentBaseline.profiles.keys.forEach { baselineKey in if let benchmark: Benchmark = .init(baselineKey.name, closure:{_ in}) { benchmark.target = baselineKey.target benchmarks.append(benchmark) diff --git a/Plugins/BenchmarkTool/BenchmarkTool+PrettyPrinting.swift b/Plugins/BenchmarkTool/BenchmarkTool+PrettyPrinting.swift index 616f1f97..62ac8b2a 100644 --- a/Plugins/BenchmarkTool/BenchmarkTool+PrettyPrinting.swift +++ b/Plugins/BenchmarkTool/BenchmarkTool+PrettyPrinting.swift @@ -215,15 +215,15 @@ extension BenchmarkTool { let baseBaselineName = currentBaseline.baselineName let comparisonBaselineName = baseline.baselineName - var keys = baseline.results.keys.sorted(by: { $0.name < $1.name }) + var keys = baseline.profiles.keys.sorted(by: { $0.name < $1.name }) keys.removeAll(where: { $0.target != target }) var firstOutput = true keys.forEach { key in - if let value = baseline.results[key] { - guard let baselineComparison = currentBaseline.results[key] else { + if let profile = baseline.profiles[key] { + guard let baselineComparison = currentBaseline.profiles[key] else { // print("No baseline to compare with for `\(key.target):\(key.name)`.") return } @@ -242,9 +242,9 @@ extension BenchmarkTool { printText("----------------------------------------------------------------------------------------------------------------------------") print("") - value.forEach { currentResult in + profile.results.forEach { currentResult in var result = currentResult - if let base = baselineComparison.first(where: { $0.metric == result.metric }) { + if let base = baselineComparison.results.first(where: { $0.metric == result.metric }) { let hideResults = result.deviationsComparedWith(base, thresholds: result.thresholds ?? BenchmarkThresholds.none).regressions.isEmpty // We hide the markdown results if they are better than baseline to cut down noise diff --git a/Plugins/BenchmarkTool/BenchmarkTool.swift b/Plugins/BenchmarkTool/BenchmarkTool.swift index 7cfc0146..5c8aca82 100644 --- a/Plugins/BenchmarkTool/BenchmarkTool.swift +++ b/Plugins/BenchmarkTool/BenchmarkTool.swift @@ -38,8 +38,6 @@ extension BaselineOperation: ExpressibleByArgument {} extension ThresholdsOperation: ExpressibleByArgument {} extension BenchmarkMetric: ExpressibleByArgument {} -typealias BenchmarkResults = [BenchmarkIdentifier: [BenchmarkResult]] - fileprivate var failedBenchmarkRuns = 0 @main @@ -295,7 +293,7 @@ struct BenchmarkTool: AsyncParsableCommand { "Running Benchmarks".printAsHeader() } - var benchmarkResults: BenchmarkResults = [:] + var benchmarkResults: [BenchmarkIdentifier: BenchmarkBaseline.Profile] = [:] benchmarks.sort { ($0.target, $0.name) < ($1.target, $1.name) } @@ -309,8 +307,13 @@ struct BenchmarkTool: AsyncParsableCommand { printChildRunError(error: result, benchmarkExecutablePath: benchmark.executablePath!) } } - - benchmarkResults = benchmarkResults.merging(results) { _, new in new } + + for result in results { + benchmarkResults[result.key] = BenchmarkBaseline.Profile( + benchmark: benchmark, + results: result.value + ) + } } } @@ -342,10 +345,10 @@ struct BenchmarkTool: AsyncParsableCommand { mutating func runChild(benchmarkPath: String, benchmarkCommand: BenchmarkOperation, benchmark: Benchmark? = nil, - completion: ((Int32) -> Void)? = nil) throws -> BenchmarkResults { + completion: ((Int32) -> Void)? = nil) throws -> BenchmarkResultsByIdentifier { var pid: pid_t = 0 - var benchmarkResults: BenchmarkResults = [:] + var benchmarkResults: BenchmarkResultsByIdentifier = [:] let fromChild = try FileDescriptor.pipe() let toChild = try FileDescriptor.pipe() let path = FilePath(benchmarkPath) diff --git a/Sources/Benchmark/Benchmark.swift b/Sources/Benchmark/Benchmark.swift index 33d856d7..dda7a6c5 100644 --- a/Sources/Benchmark/Benchmark.swift +++ b/Sources/Benchmark/Benchmark.swift @@ -454,6 +454,8 @@ public extension Benchmark { public var skip = false /// Customized threshold tolerances for a given metric for the Benchmark used for checking for regressions/improvements/equality. public var thresholds: [BenchmarkMetric: BenchmarkThresholds]? + /// Benchmark specific configurations to be provided to the exporters + public var exportConfigurations: BenchmarkExportConfigurations? /// Optional per-benchmark specific setup done before warmup and all iterations public var setup: BenchmarkSetupHook? /// Optional per-benchmark specific teardown done after final run is done @@ -470,6 +472,7 @@ public extension Benchmark { skip: Bool = defaultConfiguration.skip, thresholds: [BenchmarkMetric: BenchmarkThresholds]? = defaultConfiguration.thresholds, + exportConfigurations: [BenchmarkExportConfigurationKey: any BenchmarkExportConfiguration] = [:], setup: BenchmarkSetupHook? = nil, teardown: BenchmarkTeardownHook? = nil) { self.metrics = metrics @@ -482,6 +485,7 @@ public extension Benchmark { self.maxIterations = maxIterations self.skip = skip self.thresholds = thresholds + self.exportConfigurations = BenchmarkExportConfigurations(configs: exportConfigurations) self.setup = setup self.teardown = teardown } @@ -497,6 +501,7 @@ public extension Benchmark { case maxDuration case maxIterations case thresholds + case exportConfigurations } // swiftlint:enable nesting } diff --git a/Sources/Benchmark/BenchmarkExportConfigurations/BenchmarkExportConfiguration.swift b/Sources/Benchmark/BenchmarkExportConfigurations/BenchmarkExportConfiguration.swift new file mode 100644 index 00000000..880bd272 --- /dev/null +++ b/Sources/Benchmark/BenchmarkExportConfigurations/BenchmarkExportConfiguration.swift @@ -0,0 +1,79 @@ +/// A configuration used or expected by a particular result exporter +public protocol BenchmarkExportConfiguration: Codable {} + +public struct BenchmarkExportConfigurationKey: Hashable, Codable { + private let value: String +} + +public extension BenchmarkExportConfigurationKey { + static var influx: Self { .init(value: #function) } +} + +/// The set of export configurations for a particular benchmark +public struct BenchmarkExportConfigurations: Codable { + let configs: [BenchmarkExportConfigurationKey: any BenchmarkExportConfiguration] + + public init(configs: [BenchmarkExportConfigurationKey: any BenchmarkExportConfiguration]) { + self.configs = configs + } + + public subscript(_ key: BenchmarkExportConfigurationKey) -> (any BenchmarkExportConfiguration)? { + configs[key] + } +} + +extension BenchmarkExportConfigurations: ExpressibleByDictionaryLiteral { + public init(dictionaryLiteral elements: (BenchmarkExportConfigurationKey, any BenchmarkExportConfiguration)...) { + configs = Dictionary(elements, uniquingKeysWith: { $1 }) + } +} + +// N.B. We are clever with the codability implementation below +// since the value type in `BenchmarkExportConfigurations` is +// an existential type. +// The key mechanism is `BenchmarkExportConfigurationKey.resolveConfigType` +// that enables us to determine the appropriate concrete type to +// attempt to decode based on the key names located in the +// data container. + +extension BenchmarkExportConfigurationKey { + /// This is used to determine the concrete type to attempt + /// to decode for a particular ``BenchmarkExportConfigurationKey`` + static func resolveConfigType(from key: Self) -> BenchmarkExportConfiguration.Type? { + switch key { + // Add a case here when adding a new exporter config + case .influx: InfluxExportConfiguration.self + default: nil + } + } +} + +public extension BenchmarkExportConfigurations { + init(from decoder: any Decoder) throws { + let container = try decoder.container(keyedBy: BenchmarkExportConfigurationKey.self) + self.configs = try container.allKeys.reduce( + into: [BenchmarkExportConfigurationKey: any BenchmarkExportConfiguration]() + ) { configs, key in + if let configType = type(of: key).resolveConfigType(from: key) { + configs[key] = try container.decode(configType.self, forKey: key) + } + } + } + + func encode(to encoder: any Encoder) throws { + var encoder = encoder.container(keyedBy: BenchmarkExportConfigurationKey.self) + for (key, config) in configs { + try encoder.encode(config, forKey: key) + } + } +} + +extension BenchmarkExportConfigurationKey: CodingKey { + public var stringValue: String { value } + + public init?(stringValue: String) { self.init(value: stringValue) } + + public var intValue: Int? { nil } + + public init?(intValue: Int) { nil } +} diff --git a/Sources/Benchmark/BenchmarkExportConfigurations/InfluxExportConfiguration.swift b/Sources/Benchmark/BenchmarkExportConfigurations/InfluxExportConfiguration.swift new file mode 100644 index 00000000..25505f4a --- /dev/null +++ b/Sources/Benchmark/BenchmarkExportConfigurations/InfluxExportConfiguration.swift @@ -0,0 +1,29 @@ +public struct InfluxExportConfiguration: BenchmarkExportConfiguration { + /// The set of benchmark tags to interpret as Influx fields. + /// The default is to treat benchmark tags as Influx tags. + public let fields: [String: InfluxDataType] + + public enum InfluxDataType: String, Codable { + // References: https://docs.influxdata.com/influxdb/cloud/reference/syntax/annotated-csv/#data-types + + case boolean + /// Unsigned 64-bit integer + case unsignedLong + /// Signed 64-bit integer + case long + /// IEEE-754 64-bit floating-point number + case double + /// UTF-8 encoded string + case string + /// Base64 encoded sequence of bytes as defined in RFC 4648 + case base64Binary + /// Instant in time, may be followed with a colon : and a description of the format (number, RFC3339, RFC3339Nano) + case dateTime + /// Length of time represented as an unsigned 64-bit integer number of nanoseconds + case duration + } + + public init(fields: [String: InfluxDataType]) { + self.fields = fields + } +}