-
Notifications
You must be signed in to change notification settings - Fork 74
/
searchset.rs
349 lines (309 loc) · 13.5 KB
/
searchset.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
static USAGE: &str = r#"
Filters CSV data by whether the given regex set matches a row.
Unlike the search operation, this allows regex matching of multiple regexes
in a single pass.
The regexset-file is a plain text file with multiple regexes, with a regex on
each line.
The regex set is applied to each field in each row, and if any field matches,
then the row is written to the output, and the number of matches to stderr.
The columns to search can be limited with the '--select' flag (but the full row
is still written to the output if there is a match).
Returns exitcode 0 when matches are found, returning number of matches to stderr.
Returns exitcode 1 when no match is found, unless the '--not-one' flag is used.
When --quick is enabled, no output is produced and exitcode 0 is returned on
the first match.
For examples, see https://github.com/dathere/qsv/blob/master/tests/test_searchset.rs.
Usage:
qsv searchset [options] (<regexset-file>) [<input>]
qsv searchset --help
searchset arguments:
<regexset-file> The file containing regular expressions to match, with a
regular expression on each line.
See https://docs.rs/regex/latest/regex/index.html#syntax
or https://regex101.com with the Rust flavor for regex syntax.
<input> The CSV file to read. If not given, reads from stdin.
searchset options:
-i, --ignore-case Case insensitive search. This is equivalent to
prefixing the regex with '(?i)'.
--literal Treat the regex as a literal string. This allows
you to search for exact matches that even contain
regex special characters.
-s, --select <arg> Select the columns to search. See 'qsv select -h'
for the full syntax.
-v, --invert-match Select only rows that did not match
-u, --unicode Enable unicode support. When enabled, character classes
will match all unicode word characters instead of only
ASCII word characters. Decreases performance.
-f, --flag <column> If given, the command will not filter rows
but will instead flag the found rows in a new
column named <column>. For each found row, <column>
is set to the row number of the row, followed by a
semicolon, then a list of the matching regexes.
--flag-matches-only When --flag is enabled, only rows that match are
sent to output. Rows that do not match are filtered.
--unmatched-output <file> When --flag-matches-only is enabled, output the rows
that did not match to <file>.
-q, --quick Return on first match with an exitcode of 0, returning
the row number of the first match to stderr.
Return exit code 1 if no match is found.
No output is produced. Ignored if --json is enabled.
-c, --count Return number of matches to stderr.
Ignored if --json is enabled.
-j, --json Return number of matches, number of rows with matches,
and number of rows to stderr in JSON format.
--size-limit <mb> Set the approximate size limit (MB) of the compiled
regular expression. If the compiled expression exceeds this
number, then a compilation error is returned.
Modify this only if you're getting regular expression
compilation errors. [default: 50]
--dfa-size-limit <mb> Set the approximate size of the cache (MB) used by the regular
expression engine's Discrete Finite Automata.
Modify this only if you're getting regular expression
compilation errors. [default: 10]
--not-one Use exit code 0 instead of 1 for no match found.
Common options:
-h, --help Display this message
-o, --output <file> Write output to <file> instead of stdout.
-n, --no-headers When set, the first row will not be interpreted
as headers. (i.e., They are not searched, analyzed,
sliced, etc.)
-d, --delimiter <arg> The field delimiter for reading CSV data.
Must be a single character. (default: ,)
-p, --progressbar Show progress bars. Not valid for stdin.
-Q, --quiet Do not return number of matches to stderr.
"#;
use std::{
fs::File,
io::{self, BufRead, BufReader},
};
#[cfg(any(feature = "feature_capable", feature = "lite"))]
use indicatif::{HumanCount, ProgressBar, ProgressDrawTarget};
use log::{debug, info};
use regex::{bytes::RegexSetBuilder, Regex};
use serde::Deserialize;
use serde_json::json;
use crate::{
config::{Config, Delimiter},
select::SelectColumns,
util, CliError, CliResult,
};
#[allow(dead_code)]
#[derive(Deserialize)]
struct Args {
arg_input: Option<String>,
arg_regexset_file: String,
flag_literal: bool,
flag_select: SelectColumns,
flag_output: Option<String>,
flag_no_headers: bool,
flag_delimiter: Option<Delimiter>,
flag_invert_match: bool,
flag_unicode: bool,
flag_ignore_case: bool,
flag_flag: Option<String>,
flag_flag_matches_only: bool,
flag_unmatched_output: Option<String>,
flag_size_limit: usize,
flag_dfa_size_limit: usize,
flag_quick: bool,
flag_count: bool,
flag_json: bool,
flag_not_one: bool,
flag_progressbar: bool,
flag_quiet: bool,
}
fn read_regexset(filename: &str, literal: bool) -> io::Result<Vec<String>> {
let file = File::open(filename)?;
let reader = BufReader::new(file);
let lines = reader.lines();
if literal {
lines.map(|line| line.map(|s| regex::escape(&s))).collect()
} else {
lines.collect()
}
}
pub fn run(argv: &[&str]) -> CliResult<()> {
let args: Args = util::get_args(USAGE, argv)?;
let flag_not_one = args.flag_not_one;
if args.flag_flag.is_none() && args.flag_flag_matches_only {
return fail_incorrectusage_clierror!("Cannot use --flag-matches-only without --flag",);
}
if !args.flag_flag_matches_only && args.flag_unmatched_output.is_some() {
return fail_incorrectusage_clierror!(
"Cannot use --unmatched-output without --flag-matches-only",
);
}
let regexset = read_regexset(&args.arg_regexset_file, args.flag_literal)?;
let mut regex_labels: Vec<String> = Vec::with_capacity(regexset.len());
let labels_re = Regex::new(r".?#(?P<label>.*)$").unwrap();
// use regex comment labels if they exist, so matches are easier to understand
for (i, regex) in regexset.iter().enumerate() {
let label = labels_re
.captures(regex)
.and_then(|cap| cap.name("label"))
.map_or_else(|| (i + 1).to_string(), |m| m.as_str().to_string());
regex_labels.push(label);
}
let regex_unicode = if util::get_envvar_flag("QSV_REGEX_UNICODE") {
true
} else {
args.flag_unicode
};
debug!("Compiling {} regex set expressions...", regexset.len());
let pattern = RegexSetBuilder::new(®exset)
.case_insensitive(args.flag_ignore_case)
.unicode(regex_unicode)
.size_limit(args.flag_size_limit * (1 << 20))
.dfa_size_limit(args.flag_dfa_size_limit * (1 << 20))
.build()?;
debug!("Successfully compiled regex set!");
let rconfig = Config::new(args.arg_input.as_ref())
.delimiter(args.flag_delimiter)
.no_headers(args.flag_no_headers)
.select(args.flag_select);
let mut rdr = rconfig.reader()?;
let mut wtr = Config::new(args.flag_output.as_ref()).writer()?;
let mut unmatched_wtr = Config::new(args.flag_unmatched_output.as_ref()).writer()?;
let mut headers = rdr.byte_headers()?.clone();
let sel = rconfig.selection(&headers)?;
let do_match_list = args.flag_flag.is_some_and(|column_name| {
headers.push_field(column_name.as_bytes());
true
});
if !rconfig.no_headers && !args.flag_quick {
wtr.write_record(&headers)?;
}
let record_count = util::count_rows(&rconfig)?;
// prep progress bar
#[cfg(any(feature = "feature_capable", feature = "lite"))]
let show_progress =
(args.flag_progressbar || util::get_envvar_flag("QSV_PROGRESSBAR")) && !rconfig.is_stdin();
#[cfg(any(feature = "feature_capable", feature = "lite"))]
let progress = ProgressBar::with_draw_target(None, ProgressDrawTarget::stderr_with_hz(5));
#[cfg(any(feature = "feature_capable", feature = "lite"))]
if show_progress {
util::prep_progress(&progress, record_count);
} else {
progress.set_draw_target(ProgressDrawTarget::hidden());
}
let mut record = csv::ByteRecord::new();
let mut flag_rowi: u64 = 1;
let mut match_row_ctr: u64 = 0;
let mut total_matches: u64 = 0;
let mut row_ctr: u64 = 0;
// minimize allocs
#[allow(unused_assignments)]
let mut flag_column: Vec<u8> = Vec::with_capacity(20);
let mut match_list_vec = Vec::with_capacity(20);
#[allow(unused_assignments)]
let mut match_list = String::with_capacity(20);
let mut matched_rows = String::with_capacity(20);
#[allow(unused_assignments)]
let mut match_list_with_row = String::with_capacity(20);
let mut m;
let mut matched = false;
let mut matches: Vec<usize> = Vec::with_capacity(20);
while rdr.read_byte_record(&mut record)? {
row_ctr += 1;
#[cfg(any(feature = "feature_capable", feature = "lite"))]
if show_progress {
progress.inc(1);
}
m = sel.select(&record).any(|f| {
matched = pattern.is_match(f);
if matched && do_match_list {
matches = pattern.matches(f).into_iter().collect();
total_matches += matches.len() as u64;
for j in &mut matches {
*j += 1; // so the list is human readable - i.e. not zero-based
}
match_list_vec.clone_from(&matches);
}
matched
});
if args.flag_invert_match {
m = !m;
}
if m {
match_row_ctr += 1;
if args.flag_quick {
break;
}
}
if do_match_list {
flag_rowi += 1;
flag_column = if m {
itoa::Buffer::new()
.format(flag_rowi)
.clone_into(&mut matched_rows);
if args.flag_invert_match {
matched_rows.as_bytes().to_vec()
} else {
match_list = match_list_vec
.iter()
.map(|i| regex_labels[*i - 1].clone())
.collect::<Vec<String>>()
.join(",");
match_list_with_row = format!("{matched_rows};{match_list}");
match_list_with_row.as_bytes().to_vec()
}
} else {
b"0".to_vec()
};
if args.flag_flag_matches_only && !m {
if args.flag_unmatched_output.is_some() {
unmatched_wtr.write_byte_record(&record)?;
}
continue;
}
record.push_field(&flag_column);
wtr.write_byte_record(&record)?;
} else if m {
wtr.write_byte_record(&record)?;
}
}
unmatched_wtr.flush()?;
wtr.flush()?;
#[cfg(any(feature = "feature_capable", feature = "lite"))]
if show_progress {
if do_match_list {
progress.set_message(format!(
" - {} total matches in {} rows with matches found in {} records.",
HumanCount(total_matches),
HumanCount(match_row_ctr),
HumanCount(record_count),
));
} else {
progress.set_message(format!(
" - {} rows with matches found in {} records.",
HumanCount(match_row_ctr),
HumanCount(record_count),
));
}
util::finish_progress(&progress);
}
if args.flag_json {
let json = json!({
"rows_with_matches": match_row_ctr,
"total_matches": total_matches,
"record_count": record_count,
});
eprintln!("{json}");
} else {
if args.flag_count && !args.flag_quick {
if !args.flag_quiet {
eprintln!("{match_row_ctr}");
}
info!("matches: {match_row_ctr}");
}
if match_row_ctr == 0 && !flag_not_one {
return Err(CliError::NoMatch());
} else if args.flag_quick {
if !args.flag_quiet {
eprintln!("{row_ctr}");
}
info!("quick searchset first match at {row_ctr}");
}
}
Ok(())
}