Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix pylint warning unspecified-encoding #7222

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion autogpt/agbenchmark_config/analyze_reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@

# Loop through each JSON file to collect suffixes and success values
for report_file in sorted(report_files):
with open(report_file) as f:
with open(report_file, encoding='utf-8') as f:
logger.info(f"Loading {report_file}...")

data = json.load(f)
Expand Down
2 changes: 1 addition & 1 deletion benchmark/agbenchmark/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def run(
exit_code = None

if backend:
with open("backend/backend_stdout.txt", "w") as f:
with open("backend/backend_stdout.txt", "w", encoding="utf-8") as f:
sys.stdout = f
exit_code = run_benchmark(
config=agbenchmark_config,
Expand Down
3 changes: 2 additions & 1 deletion benchmark/agbenchmark/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@
except ValidationError as e:
if logging.getLogger().level == logging.DEBUG:
logger.warning(f"Spec file {challenge_relpath} failed to load:\n{e}")
logger.debug(f"Invalid challenge spec: {challenge_spec_file.read_text()}")
with open(challenge_spec_file, 'r', encoding='utf-8') as f:
logger.debug(f"Invalid challenge spec: {f.read_text()}")
continue
challenge_info.spec_file = challenge_spec_file

Expand Down
2 changes: 1 addition & 1 deletion benchmark/agbenchmark/challenges/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def get_unique_categories() -> set[str]:
glob_path = f"{challenges_dir}/**/data.json"

for data_file in glob.glob(glob_path, recursive=True):
with open(data_file, "r") as f:
with open(data_file, "r", encoding="utf-8") as f:
try:
challenge_data = json.load(f)
categories.update(challenge_data.get("category", []))
Expand Down
42 changes: 21 additions & 21 deletions benchmark/agbenchmark/challenges/builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

logger = logging.getLogger(__name__)

with open(Path(__file__).parent / "optional_categories.json") as f:
with open(Path(__file__).parent / "optional_categories.json", encoding="utf-8") as f:
OPTIONAL_CATEGORIES: list[str] = json.load(f)["optional_categories"]


Expand Down Expand Up @@ -307,30 +307,30 @@ def get_outputs_for_eval(
# Otherwise, it is a specific file
matching_files = [os.path.join(script_dir, file_pattern)]

logger.debug(
f"Files to evaluate for pattern `{file_pattern}`: {matching_files}"
)

for file_path in matching_files:
relative_file_path = Path(file_path).relative_to(workspace)
logger.debug(
f"Files to evaluate for pattern `{file_pattern}`: {matching_files}"
f"Evaluating {relative_file_path} "
f"(eval type: {ground.eval.type})..."
)

for file_path in matching_files:
relative_file_path = Path(file_path).relative_to(workspace)
logger.debug(
f"Evaluating {relative_file_path} "
f"(eval type: {ground.eval.type})..."
if ground.eval.type == "python":
result = subprocess.run(
[sys.executable, file_path],
cwd=os.path.abspath(workspace),
capture_output=True,
text=True,
)
if ground.eval.type == "python":
result = subprocess.run(
[sys.executable, file_path],
cwd=os.path.abspath(workspace),
capture_output=True,
text=True,
)
if "error" in result.stderr or result.returncode != 0:
yield relative_file_path, f"Error: {result.stderr}\n"
else:
yield relative_file_path, f"Output: {result.stdout}\n"
if "error" in result.stderr or result.returncode != 0:
yield relative_file_path, f"Error: {result.stderr}\n"
else:
with open(file_path, "r") as f:
yield relative_file_path, f.read()
yield relative_file_path, f"Output: {result.stdout}\n"
else:
with open(file_path, "r", encoding="utf-8") as f:
yield relative_file_path, f.read()
else:
if ground.eval.type == "pytest":
result = subprocess.run(
Expand Down
2 changes: 1 addition & 1 deletion benchmark/reports/json_to_base_64.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import json

# Load JSON data from a file
with open("secrets.json", "r") as f:
with open("secrets.json", "r", encoding="utf-8") as f:
data = json.load(f)

# Convert the JSON object into a string
Expand Down
4 changes: 2 additions & 2 deletions benchmark/reports/match_records.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ def get_reports():
for report_file in report_files:
# Check if the report.json file exists
if os.path.isfile(report_file):
# Open the report.json file
with open(report_file, "r") as f:
# Open the report.json file with UTF-8 encoding
with open(report_file, "r", encoding="utf-8") as f:
# Load the JSON data from the file
json_data = json.load(f)
print(f"Processing {report_file}")
Expand Down
2 changes: 1 addition & 1 deletion benchmark/reports/send_to_googledrive.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def process_test(

if os.path.exists(report_path):
# Load the JSON data from the file
with open(report_path, "r") as f:
with open(report_path, "r", encoding="utf-8") as f:
data = json.load(f)
benchmark_start_time = data.get("benchmark_start_time", "")

Expand Down
6 changes: 3 additions & 3 deletions cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ def benchmark_categories_list():
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
if "deprecated" not in data_file:
with open(data_file, "r") as f:
with open(data_file, "r", encoding="utf-8") as f:
try:
data = json.load(f)
categories.update(data.get("category", []))
Expand Down Expand Up @@ -340,7 +340,7 @@ def benchmark_tests_list():
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
if "deprecated" not in data_file:
with open(data_file, "r") as f:
with open(data_file, "r", encoding="utf-8") as f:
try:
data = json.load(f)
category = data.get("category", [])
Expand Down Expand Up @@ -389,7 +389,7 @@ def benchmark_tests_details(test_name):
)
# Use it as the base for the glob pattern, excluding 'deprecated' directory
for data_file in glob.glob(glob_path, recursive=True):
with open(data_file, "r") as f:
with open(data_file, "r", encoding="utf-8") as f:
try:
data = json.load(f)
if data.get("name") == test_name:
Expand Down
5 changes: 4 additions & 1 deletion forge/forge/file_storage/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,10 @@ def _open_file(self, path: str | Path, mode: str) -> TextIO | BinaryIO:
full_path = self.get_path(path)
if any(m in mode for m in ("w", "a", "x")):
full_path.parent.mkdir(parents=True, exist_ok=True)
return open(full_path, mode) # type: ignore
if 'b' in mode:
return open(full_path, mode)
else:
return open(full_path, mode, encoding='utf-8')

@overload
def read_file(self, path: str | Path, binary: Literal[False] = False) -> str:
Expand Down
2 changes: 1 addition & 1 deletion forge/forge/llm/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def get_model_access_kwargs(self, model: str) -> dict[str, str]:
return kwargs

def load_azure_config(self, config_file: Path) -> None:
with open(config_file) as file:
with open(config_file, 'r', encoding='utf-8') as file:
config_params = yaml.load(file, Loader=yaml.SafeLoader) or {}

try:
Expand Down
Loading