Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions cmd/diag/command/collect.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,9 @@ func newCollectCmd() *cobra.Command {
cmd.Flags().StringSliceVar(&ext, "exclude", nil, "types of data not to collect")
cmd.Flags().StringSliceVar(&cOpt.MetricsFilter, "metricsfilter", nil, "prefix of metrics to collect")
cmd.Flags().StringSliceVar(&cOpt.MetricsExclude, "metricsexclude", []string{"node_interrupts_total"}, "prefix of metrics to exclude")
cmd.Flags().StringSliceVar(&cOpt.MetricsLowPriority, "metrics-low-priority", []string{"tidb_tikvclient_request_seconds_bucket"},
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This needs to be considered: whether to separate the data pulling of high-volume metrics from regular metrics by default.

"prefix of metrics to collect with low priority")
cmd.Flags().IntVar(&cOpt.MetricsMinInterval, "metrics-min-interval", 60, "the minimum interval of a single request in seconds")
cmd.Flags().IntVar(&cOpt.MetricsLimit, "metricslimit", 10000, "metric size limit of single request, specified in series*hour per request")
cmd.Flags().StringVar(&metricsConf, "metricsconfig", "", "config file of metricsfilter")
cmd.Flags().StringSliceVar(&labels, "metricslabel", nil, "only collect metrics that match labels")
Expand Down
55 changes: 30 additions & 25 deletions collector/collect.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,28 +108,30 @@ type BaseOptions struct {

// CollectOptions contains the options defining which type of data to collect
type CollectOptions struct {
RawRequest interface{} // raw collect command or request
Mode string // the cluster is deployed with what type of tool
DiagMode string // run diag collect at command line mode or server mode
ProfileName string // the name of a pre-defined collecting profile
Collectors CollectTree // struct to show which collector is enabled
MetricsFilter []string // prefix of metrics to collect
MetricsExclude []string //prefix of metrics to exclude
MetricsLabel map[string]string // label to filte metrics
Dir string // target directory to store collected data
Limit int // rate limit of SCP
MetricsLimit int // query limit of one request
PerfDuration int //seconds: profile time(s), default is 30s.
CompressScp bool // compress of files during collecting
CompressMetrics bool // compress of files during collecting
RawMonitor bool // collect raw data for metrics
ExitOnError bool // break the process and exit when an error occur
ExtendedAttrs map[string]string // extended attributes used for manual collecting mode
ExplainSQLPath string // File path for explain sql
ExplainSqls []string // explain sqls
CurrDB string
Header []string
UsePortForward bool // use portforward when call api inside k8s cluster
RawRequest interface{} // raw collect command or request
Mode string // the cluster is deployed with what type of tool
DiagMode string // run diag collect at command line mode or server mode
ProfileName string // the name of a pre-defined collecting profile
Collectors CollectTree // struct to show which collector is enabled
MetricsFilter []string // prefix of metrics to collect
MetricsExclude []string // prefix of metrics to exclude
MetricsLowPriority []string // prefix of metrics to collect with low priority
MetricsLabel map[string]string // label to filte metrics
Dir string // target directory to store collected data
Limit int // rate limit of SCP
MetricsLimit int // query limit of one request
MetricsMinInterval int // query minimum interval of one request, default is 1min.
PerfDuration int // seconds: profile time(s), default is 30s.
CompressScp bool // compress of files during collecting
CompressMetrics bool // compress of files during collecting
RawMonitor bool // collect raw data for metrics
ExitOnError bool // break the process and exit when an error occur
ExtendedAttrs map[string]string // extended attributes used for manual collecting mode
ExplainSQLPath string // File path for explain sql
ExplainSqls []string // explain sqls
CurrDB string
Header []string
UsePortForward bool // use portforward when call api inside k8s cluster
}

// CollectStat is estimated size stats of data to be collected
Expand Down Expand Up @@ -301,7 +303,9 @@ func (m *Manager) CollectClusterInfo(
label: cOpt.MetricsLabel,
filter: cOpt.MetricsFilter,
exclude: cOpt.MetricsExclude,
lowPriority: cOpt.MetricsLowPriority,
limit: cOpt.MetricsLimit,
minInterval: cOpt.MetricsMinInterval,
compress: cOpt.CompressMetrics,
customHeader: cOpt.Header,
portForward: cOpt.UsePortForward,
Expand Down Expand Up @@ -537,8 +541,9 @@ func (m *Manager) CollectClusterInfo(
// run collectors
collectErrs := make(map[string]error)
for _, c := range collectors {
fmt.Printf("Collecting %s...\n", c.Desc())
m.logger.Infof("Collecting %s...\n", c.Desc())
timeNow := time.Now()
fmt.Printf("Collecting %s..., time:%v\n", c.Desc(), timeNow)
m.logger.Infof("Collecting %s..., time:%v\n", c.Desc(), timeNow)
if err := c.Collect(m, cls); err != nil {
if cOpt.ExitOnError {
return "", err
Expand Down Expand Up @@ -569,7 +574,7 @@ func (m *Manager) CollectClusterInfo(
}
logStr := fmt.Sprintf("The collected data has been stored in %s. For more details, please refer to the log at %s/diag.log.", dir, dir)
fmt.Println(logStr)
m.logger.Infof(logStr)
m.logger.Infof("%s", logStr)
return resultDir, nil
}

Expand Down
2 changes: 1 addition & 1 deletion collector/prom2influx.go
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ func buildPoints(
func writeBatchPoints(client influx.Client, data promDump, opts *RebuildOptions) error {
// build and write points
var errr error
tl := utils.NewTokenLimiter(uint(opts.Concurrency))
tl := utils.NewTokenLimiter(opts.Concurrency)
wg := sync.WaitGroup{}
for _, series := range data.Data.Result {
ptChan := buildPoints(series, opts)
Expand Down
Loading
Loading