Skip to content

Commit 19aa034

Browse files
Optional arguments is keyword-only arguments (#3740)
### Changes Use keyword-only arguments to save backward compatibility during changes of function signatures. ### Related tickets 132510 ### Tests examples - https://github.com/openvinotoolkit/nncf/actions/runs/19545748204 wc - https://github.com/openvinotoolkit/nncf/actions/runs/19538526758
1 parent f1bb26b commit 19aa034

File tree

8 files changed

+28
-15
lines changed

8 files changed

+28
-15
lines changed

src/nncf/common/strip.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
@tracked_function(category=MODEL_BASED_CATEGORY, extractors=[FunctionCallTelemetryExtractor("nncf.strip")])
2929
def strip(
3030
model: TModel,
31+
*,
3132
do_copy: bool = True,
3233
strip_format: StripFormat = StripFormat.NATIVE,
3334
example_input: Optional[Any] = None,

src/nncf/pruning/prune_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@
2424

2525
def prune(
2626
model: TModel,
27-
*,
2827
mode: PruneMode,
28+
*,
2929
ratio: Optional[float] = None,
3030
ignored_scope: Optional[IgnoredScope] = None,
3131
examples_inputs: Optional[Any] = None,

src/nncf/quantization/quantize_model.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ def _update_advanced_quantization_parameters(
129129
def quantize(
130130
model: TModel,
131131
calibration_dataset: Dataset,
132+
*,
132133
mode: Optional[QuantizationMode] = None,
133134
preset: Optional[QuantizationPreset] = None,
134135
target_device: TargetDevice = TargetDevice.ANY,
@@ -282,6 +283,7 @@ def quantize_with_accuracy_control(
282283
calibration_dataset: Dataset,
283284
validation_dataset: Dataset,
284285
validation_fn: Callable[[Any, Iterable[Any]], tuple[float, Union[None, list[float], list[list[TTensor]]]]],
286+
*,
285287
max_drop: float = 0.01,
286288
drop_type: DropType = DropType.ABSOLUTE,
287289
preset: Optional[QuantizationPreset] = None,
@@ -407,14 +409,14 @@ def quantize_with_accuracy_control(
407409
)
408410
def compress_weights(
409411
model: TModel,
412+
*,
410413
mode: CompressWeightsMode = CompressWeightsMode.INT8_ASYM,
411414
ratio: Optional[float] = None,
412415
group_size: Optional[int] = None,
413416
ignored_scope: Optional[IgnoredScope] = None,
414417
all_layers: Optional[bool] = None,
415418
dataset: Optional[Dataset] = None,
416419
sensitivity_metric: Optional[SensitivityMetric] = None,
417-
*,
418420
subset_size: int = 128,
419421
awq: Optional[bool] = None,
420422
scale_estimation: Optional[bool] = None,

tests/onnx/quantization/test_weights_compression.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def calculate_numbers_of_quantized_weights(model: onnx.ModelProto) -> WeightType
121121
)
122122
def test_numbers_of_quantized_weights(mode, reference_counter):
123123
model = create_model()
124-
model = compress_weights(model, mode)
124+
model = compress_weights(model, mode=mode)
125125
counter = calculate_numbers_of_quantized_weights(model)
126126
assert counter == reference_counter
127127

@@ -133,7 +133,7 @@ def test_numbers_of_quantized_weights(mode, reference_counter):
133133
def test_correct_dequantizelinear_int8(mode_weight_type):
134134
mode, expected_weight_type = mode_weight_type
135135
model = create_model()
136-
model = compress_weights(model, mode)
136+
model = compress_weights(model, mode=mode)
137137

138138
dq_cnt = 0
139139
for node in model.graph.node:
@@ -164,7 +164,7 @@ def test_correct_dequantizelinear_int8(mode_weight_type):
164164
def test_correct_dequantizelinear_uint8(mode_weight_type):
165165
mode, expected_weight_type = mode_weight_type
166166
model = create_model()
167-
model = compress_weights(model, mode)
167+
model = compress_weights(model, mode=mode)
168168

169169
dq_cnt = 0
170170
for node in model.graph.node:
@@ -204,7 +204,7 @@ def test_correct_dequantizelinear_uint8(mode_weight_type):
204204
def test_correct_dequantizelinear_int4(mode_weight_type, group_size):
205205
mode, expected_weight_type = mode_weight_type
206206
model = create_model()
207-
model = compress_weights(model, mode, group_size=group_size, all_layers=True)
207+
model = compress_weights(model, mode=mode, group_size=group_size, all_layers=True)
208208

209209
dq_cnt = 0
210210
for node in model.graph.node:
@@ -240,7 +240,7 @@ def test_correct_dequantizelinear_int4(mode_weight_type, group_size):
240240
def test_correct_dequantizelinear_uint4(mode_weight_type, group_size):
241241
mode, expected_weight_type = mode_weight_type
242242
model = create_model()
243-
model = compress_weights(model, mode, group_size=group_size, all_layers=True)
243+
model = compress_weights(model, mode=mode, group_size=group_size, all_layers=True)
244244

245245
dq_cnt = 0
246246
for node in model.graph.node:
@@ -281,7 +281,7 @@ def test_correct_dequantizelinear_uint4(mode_weight_type, group_size):
281281
)
282282
def test_compression_with_inference(mode):
283283
model = create_model()
284-
model = compress_weights(model, mode)
284+
model = compress_weights(model, mode=mode)
285285
onnx.checker.check_model(model)
286286
input_data = np.random.rand(100, 1280).astype(np.float32)
287287
session = InferenceSession(model.SerializeToString())

tests/openvino/native/quantization/test_weights_compression.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -611,7 +611,7 @@ def test_shared_gather(mode):
611611
"matmul_1_data": ov.Type.i4 if mode == CompressWeightsMode.INT4_SYM else ov.Type.u4,
612612
}
613613
model = GatherAndMatmulShareData().ov_model
614-
compressed_model = compress_weights(model, mode, group_size=3)
614+
compressed_model = compress_weights(model, mode=mode, group_size=3)
615615
for op in compressed_model.get_ordered_ops():
616616
op_name = op.get_friendly_name()
617617
if op.get_type_name() == "Constant" and op_name in weight_name_vs_type:
@@ -626,7 +626,7 @@ def test_shared_gather_all_layers(all_layers):
626626
"matmul_1_data": ov.Type.u4,
627627
}
628628
model = GatherAndMatmulShareData().ov_model
629-
compressed_model = compress_weights(model, CompressWeightsMode.INT4_ASYM, group_size=-1, all_layers=all_layers)
629+
compressed_model = compress_weights(model, mode=CompressWeightsMode.INT4_ASYM, group_size=-1, all_layers=all_layers)
630630
for op in compressed_model.get_ordered_ops():
631631
op_name = op.get_friendly_name()
632632
if op.get_type_name() == "Constant" and op_name in weight_name_vs_type:

tests/openvino/optimized_functions/test_compression_functions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,7 @@ def get_input_node_data(node: ov.Node, input_id: int) -> Tensor:
389389
if is_data_aware:
390390
compression_kwargs["dataset"] = create_dataset(model)
391391

392-
nncf.compress_weights(model, config.mode, group_size=config.group_size, **compression_kwargs)
392+
nncf.compress_weights(model, mode=config.mode, group_size=config.group_size, **compression_kwargs)
393393

394394
if cb == ComputationBackend.NumPy:
395395
mock.assert_not_called()

tests/torch/quantization/test_strip.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -316,9 +316,9 @@ def test_nncf_strip_api(strip_type, do_copy):
316316
quantized_model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
317317

318318
if strip_type == "nncf":
319-
strip_model = nncf.strip(quantized_model, do_copy)
319+
strip_model = nncf.strip(quantized_model, do_copy=do_copy)
320320
elif strip_type == "torch":
321-
strip_model = nncf.torch.strip(quantized_model, do_copy)
321+
strip_model = nncf.torch.strip(quantized_model, do_copy=do_copy)
322322
elif strip_type == "nncf_interfere":
323323
strip_model = quantized_model.nncf.strip(do_copy)
324324

tests/torch2/function_hook/quantization/strip/test_strip_native.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,19 @@ def test_nncf_strip_api(strip_type: str, do_copy: bool):
3232
quantized_model = nncf.quantize(model, nncf.Dataset([torch.ones(model.INPUT_SIZE)]), subset_size=1)
3333

3434
if strip_type == "nncf":
35-
strip_model = nncf.strip(quantized_model, do_copy, nncf.StripFormat.NATIVE, example_input)
35+
strip_model = nncf.strip(
36+
quantized_model,
37+
do_copy=do_copy,
38+
strip_format=nncf.StripFormat.NATIVE,
39+
example_input=example_input,
40+
)
3641
elif strip_type == "torch":
37-
strip_model = nncf.torch.strip(quantized_model, do_copy, nncf.StripFormat.NATIVE, example_input)
42+
strip_model = nncf.torch.strip(
43+
quantized_model,
44+
do_copy=do_copy,
45+
strip_format=nncf.StripFormat.NATIVE,
46+
example_input=example_input,
47+
)
3848

3949
if do_copy:
4050
assert id(strip_model) != id(quantized_model)

0 commit comments

Comments
 (0)