Skip to content

Commit d74f1a0

Browse files
Refactor OCI code to appease the lint god
Simplify _lint.yml. Rephrase OCI code base to pass make lint checks.
1 parent 9e65808 commit d74f1a0

17 files changed

+2772
-1895
lines changed

.github/workflows/_lint.yml

Lines changed: 5 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -42,17 +42,11 @@ jobs:
4242
working-directory: ${{ inputs.working-directory }}
4343
cache-key: lint-with-extras
4444

45-
- name: Check Poetry File
46-
shell: bash
45+
- name: Check Poetry configuration
4746
working-directory: ${{ inputs.working-directory }}
4847
run: |
4948
poetry check
50-
51-
- name: Check lock file
52-
shell: bash
53-
working-directory: ${{ inputs.working-directory }}
54-
run: |
55-
poetry lock --check
49+
poetry check --lock
5650
5751
- name: Install dependencies
5852
# Also installs dev/lint/test/typing dependencies, to ensure we have
@@ -65,38 +59,9 @@ jobs:
6559
# It doesn't matter how you change it, any change will cause a cache-bust.
6660
working-directory: ${{ inputs.working-directory }}
6761
run: |
68-
poetry install --with lint,typing
69-
70-
- name: Get .mypy_cache to speed up mypy
71-
uses: actions/cache@v4
72-
env:
73-
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
74-
with:
75-
path: |
76-
${{ env.WORKDIR }}/.mypy_cache
77-
key: mypy-lint-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
78-
79-
80-
- name: Analysing the code with our lint
81-
working-directory: ${{ inputs.working-directory }}
82-
run: |
83-
make lint_package
84-
85-
- name: Install unit+integration test dependencies
86-
working-directory: ${{ inputs.working-directory }}
87-
run: |
88-
poetry install --with test,test_integration
89-
90-
- name: Get .mypy_cache_test to speed up mypy
91-
uses: actions/cache@v4
92-
env:
93-
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
94-
with:
95-
path: |
96-
${{ env.WORKDIR }}/.mypy_cache_test
97-
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', inputs.working-directory)) }}
62+
poetry install --with lint,typing,test,test_integration
9863
99-
- name: Analysing the code with our lint
64+
- name: Run linting
10065
working-directory: ${{ inputs.working-directory }}
10166
run: |
102-
make lint_tests
67+
make lint

libs/oci/Makefile

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,14 @@ lint_tests: PYTHON_FILES=tests
2727
lint_tests: MYPY_CACHE=.mypy_cache_test
2828

2929
lint lint_diff lint_package lint_tests:
30-
poetry run ruff .
31-
poetry run ruff format $(PYTHON_FILES) --diff
32-
poetry run ruff --select I $(PYTHON_FILES)
30+
poetry run ruff check .
31+
poetry run ruff format $(PYTHON_FILES) --check --diff
32+
poetry run ruff check --select I $(PYTHON_FILES)
3333
mkdir -p $(MYPY_CACHE); poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
3434

3535
format format_diff:
3636
poetry run ruff format $(PYTHON_FILES)
37-
poetry run ruff --select I --fix $(PYTHON_FILES)
37+
poetry run ruff check --select I --fix $(PYTHON_FILES)
3838

3939
spell_check:
4040
poetry run codespell --toml pyproject.toml

libs/oci/langchain_oci/chat_models/oci_data_science.py

Lines changed: 11 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -275,8 +275,7 @@ def validate_openai(cls, values: Any) -> Any:
275275
"""Checks if langchain_openai is installed."""
276276
if not importlib.util.find_spec("langchain_openai"):
277277
raise ImportError(
278-
"Could not import langchain_openai package. "
279-
"Please install it with `pip install langchain_openai`."
278+
"Could not import langchain_openai package. Please install it with `pip install langchain_openai`."
280279
)
281280
return values
282281

@@ -303,9 +302,7 @@ def _default_params(self) -> Dict[str, Any]:
303302
"stream": self.streaming,
304303
}
305304

306-
def _headers(
307-
self, is_async: Optional[bool] = False, body: Optional[dict] = None
308-
) -> Dict:
305+
def _headers(self, is_async: Optional[bool] = False, body: Optional[dict] = None) -> Dict:
309306
"""Construct and return the headers for a request.
310307
311308
Args:
@@ -357,17 +354,13 @@ def _generate(
357354
response = chat.invoke(messages)
358355
""" # noqa: E501
359356
if self.streaming:
360-
stream_iter = self._stream(
361-
messages, stop=stop, run_manager=run_manager, **kwargs
362-
)
357+
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager, **kwargs)
363358
return generate_from_stream(stream_iter)
364359

365360
requests_kwargs = kwargs.pop("requests_kwargs", {})
366361
params = self._invocation_params(stop, **kwargs)
367362
body = self._construct_json_body(messages, params)
368-
res = self.completion_with_retry(
369-
data=body, run_manager=run_manager, **requests_kwargs
370-
)
363+
res = self.completion_with_retry(data=body, run_manager=run_manager, **requests_kwargs)
371364
return self._process_response(res.json())
372365

373366
def _stream(
@@ -415,9 +408,7 @@ def _stream(
415408
params = self._invocation_params(stop, **kwargs)
416409
body = self._construct_json_body(messages, params) # request json body
417410

418-
response = self.completion_with_retry(
419-
data=body, run_manager=run_manager, stream=True, **requests_kwargs
420-
)
411+
response = self.completion_with_retry(data=body, run_manager=run_manager, stream=True, **requests_kwargs)
421412
default_chunk_class = AIMessageChunk
422413
for line in self._parse_stream(response.iter_lines()):
423414
chunk = self._handle_sse_line(line, default_chunk_class)
@@ -467,9 +458,7 @@ async def _agenerate(
467458
468459
""" # noqa: E501
469460
if self.streaming:
470-
stream_iter = self._astream(
471-
messages, stop=stop, run_manager=run_manager, **kwargs
472-
)
461+
stream_iter = self._astream(messages, stop=stop, run_manager=run_manager, **kwargs)
473462
return await agenerate_from_stream(stream_iter)
474463

475464
requests_kwargs = kwargs.pop("requests_kwargs", {})
@@ -593,19 +582,14 @@ def with_structured_output(
593582
else JsonOutputParser()
594583
)
595584
else:
596-
raise ValueError(
597-
f"Unrecognized method argument. Expected `json_mode`."
598-
f"Received: `{method}`."
599-
)
585+
raise ValueError(f"Unrecognized method argument. Expected `json_mode`.Received: `{method}`.")
600586

601587
if include_raw:
602588
parser_assign = RunnablePassthrough.assign(
603589
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
604590
)
605591
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
606-
parser_with_fallback = parser_assign.with_fallbacks(
607-
[parser_none], exception_key="parsing_error"
608-
)
592+
parser_with_fallback = parser_assign.with_fallbacks([parser_none], exception_key="parsing_error")
609593
return RunnableMap(raw=llm) | parser_with_fallback
610594
else:
611595
return llm | output_parser
@@ -688,9 +672,7 @@ def _process_stream_response(
688672
if not isinstance(choice, dict):
689673
raise TypeError("Endpoint response is not well formed.")
690674
except (KeyError, IndexError, TypeError) as e:
691-
raise ValueError(
692-
"Error while formatting response payload for chat model of type"
693-
) from e
675+
raise ValueError("Error while formatting response payload for chat model of type") from e
694676

695677
chunk = _convert_delta_to_message_chunk(choice["delta"], default_chunk_cls)
696678
default_chunk_cls = chunk.__class__
@@ -702,9 +684,7 @@ def _process_stream_response(
702684
if usage is not None:
703685
gen_info.update({"usage": usage})
704686

705-
return ChatGenerationChunk(
706-
message=chunk, generation_info=gen_info if gen_info else None
707-
)
687+
return ChatGenerationChunk(message=chunk, generation_info=gen_info if gen_info else None)
708688

709689
def _process_response(self, response_json: dict) -> ChatResult:
710690
"""Formats response in OpenAI spec.
@@ -729,9 +709,7 @@ def _process_response(self, response_json: dict) -> ChatResult:
729709
if not isinstance(choices, list):
730710
raise TypeError("Endpoint response is not well formed.")
731711
except (KeyError, TypeError) as e:
732-
raise ValueError(
733-
"Error while formatting response payload for chat model of type"
734-
) from e
712+
raise ValueError("Error while formatting response payload for chat model of type") from e
735713

736714
for choice in choices:
737715
message = _convert_dict_to_message(choice["message"])

0 commit comments

Comments
 (0)