@@ -41,22 +41,6 @@ const infoSchemaProcesslistQuery = `
41
41
GROUP BY user, host, command, state
42
42
`
43
43
44
- // Tunable flags.
45
- var (
46
- processlistMinTime = kingpin .Flag (
47
- "collect.info_schema.processlist.min_time" ,
48
- "Minimum time a thread must be in each state to be counted" ,
49
- ).Default ("0" ).Int ()
50
- processesByUserFlag = kingpin .Flag (
51
- "collect.info_schema.processlist.processes_by_user" ,
52
- "Enable collecting the number of processes by user" ,
53
- ).Default ("true" ).Bool ()
54
- processesByHostFlag = kingpin .Flag (
55
- "collect.info_schema.processlist.processes_by_host" ,
56
- "Enable collecting the number of processes by host" ,
57
- ).Default ("true" ).Bool ()
58
- )
59
-
60
44
// Metric descriptors.
61
45
var (
62
46
processlistCountDesc = prometheus .NewDesc (
78
62
)
79
63
80
64
// ScrapeProcesslist collects from `information_schema.processlist`.
81
- type ScrapeProcesslist struct {}
65
+ type ScrapeProcesslist struct {
66
+ ProcessListMinTime int
67
+ ProcessesByUserFlag bool
68
+ ProcessesByHostFlag bool
69
+ }
82
70
83
71
// Name of the Scraper. Should be unique.
84
72
func (ScrapeProcesslist ) Name () string {
@@ -95,11 +83,27 @@ func (ScrapeProcesslist) Version() float64 {
95
83
return 5.1
96
84
}
97
85
86
+ // RegisterFlags adds flags to configure the Scraper.
87
+ func (s * ScrapeProcesslist ) RegisterFlags (application * kingpin.Application ) {
88
+ application .Flag (
89
+ "collect.info_schema.processlist.min_time" ,
90
+ "Minimum time a thread must be in each state to be counted" ,
91
+ ).Default ("0" ).IntVar (& s .ProcessListMinTime )
92
+ application .Flag (
93
+ "collect.info_schema.processlist.processes_by_user" ,
94
+ "Enable collecting the number of processes by user" ,
95
+ ).Default ("true" ).BoolVar (& s .ProcessesByUserFlag )
96
+ application .Flag (
97
+ "collect.info_schema.processlist.processes_by_host" ,
98
+ "Enable collecting the number of processes by host" ,
99
+ ).Default ("true" ).BoolVar (& s .ProcessesByHostFlag )
100
+ }
101
+
98
102
// Scrape collects data from database connection and sends it over channel as prometheus metric.
99
- func (ScrapeProcesslist ) Scrape (ctx context.Context , instance * instance , ch chan <- prometheus.Metric , logger * slog.Logger ) error {
103
+ func (s ScrapeProcesslist ) Scrape (ctx context.Context , instance * instance , ch chan <- prometheus.Metric , logger * slog.Logger ) error {
100
104
processQuery := fmt .Sprintf (
101
105
infoSchemaProcesslistQuery ,
102
- * processlistMinTime ,
106
+ s . ProcessListMinTime ,
103
107
)
104
108
db := instance .getDB ()
105
109
processlistRows , err := db .QueryContext (ctx , processQuery )
@@ -162,12 +166,13 @@ func (ScrapeProcesslist) Scrape(ctx context.Context, instance *instance, ch chan
162
166
}
163
167
}
164
168
165
- if * processesByHostFlag {
169
+ if s . ProcessesByHostFlag {
166
170
for _ , host := range sortedMapKeys (stateHostCounts ) {
167
171
ch <- prometheus .MustNewConstMetric (processesByHostDesc , prometheus .GaugeValue , float64 (stateHostCounts [host ]), host )
168
172
}
169
173
}
170
- if * processesByUserFlag {
174
+
175
+ if s .ProcessesByUserFlag {
171
176
for _ , user := range sortedMapKeys (stateUserCounts ) {
172
177
ch <- prometheus .MustNewConstMetric (processesByUserDesc , prometheus .GaugeValue , float64 (stateUserCounts [user ]), user )
173
178
}
0 commit comments