Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Switch to Simple API #65

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions micropip/_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
REPODATA_INFO,
REPODATA_PACKAGES,
fetch_bytes,
fetch_string,
fetch_string_and_headers,
get_dynlibs,
loadDynlib,
loadedPackages,
Expand All @@ -20,7 +20,7 @@
REPODATA_INFO,
REPODATA_PACKAGES,
fetch_bytes,
fetch_string,
fetch_string_and_headers,
get_dynlibs,
loadDynlib,
loadedPackages,
Expand All @@ -33,7 +33,7 @@
"REPODATA_INFO",
"REPODATA_PACKAGES",
"fetch_bytes",
"fetch_string",
"fetch_string_and_headers",
"loadedPackages",
"loadDynlib",
"loadPackage",
Expand Down
18 changes: 15 additions & 3 deletions micropip/_compat_in_pyodide.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

try:
import pyodide_js
from js import Object
from pyodide_js import loadedPackages, loadPackage
from pyodide_js._api import loadBinaryFile, loadDynlib # type: ignore[import]

Expand All @@ -30,13 +31,24 @@ async def fetch_bytes(url: str, kwargs: dict[str, str]) -> IO[bytes]:
return BytesIO(result_bytes)


async def fetch_string(url: str, kwargs: dict[str, str]) -> str:
return await (await pyfetch(url, **kwargs)).string()
async def fetch_string_and_headers(
url: str, kwargs: dict[str, str]
) -> tuple[str, dict[str, str]]:
# TODO: pyfetch needs a better way to get headers...
# (https://github.com/pyodide/pyodide/pull/2078)

response = await pyfetch(url, **kwargs)
headers: dict[str, str] = Object.fromEntries(
response.js_response.headers.entries()
).to_py()
content = await response.string()

return content, headers


__all__ = [
"fetch_bytes",
"fetch_string",
"fetch_string_and_headers",
"REPODATA_INFO",
"REPODATA_PACKAGES",
"loadedPackages",
Expand Down
17 changes: 13 additions & 4 deletions micropip/_compat_not_in_pyodide.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,23 @@ def to_py():


from urllib.request import Request, urlopen
from urllib.response import addinfourl


def _fetch(url: str, kwargs: dict[str, str]) -> addinfourl:
return urlopen(Request(url, headers=kwargs))


async def fetch_bytes(url: str, kwargs: dict[str, str]) -> IO[bytes]:
return BytesIO(urlopen(Request(url, headers=kwargs)).read())
response = _fetch(url, kwargs=kwargs)
return BytesIO(response.read())


async def fetch_string(url: str, kwargs: dict[str, str]) -> str:
return (await fetch_bytes(url, kwargs)).read().decode()
async def fetch_string_and_headers(
url: str, kwargs: dict[str, str]
) -> tuple[str, dict[str, str]]:
response = _fetch(url, kwargs=kwargs)
return response.read().decode(), dict(response.headers)


async def loadDynlib(dynlib: str, is_shared_lib: bool) -> None:
Expand Down Expand Up @@ -108,7 +117,7 @@ def loadPackage(packages: str | list[str]) -> None:
__all__ = [
"loadDynlib",
"fetch_bytes",
"fetch_string",
"fetch_string_and_headers",
"REPODATA_INFO",
"REPODATA_PACKAGES",
"loadedPackages",
Expand Down
101 changes: 101 additions & 0 deletions micropip/_simpleapi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
from ._compat import fetch_string_and_headers
from .constants import DEFAULT_INDEX_URL
from .externals.mousebender import simple


def _parse_project_details(
content: str, content_type: str, project_name: str
) -> simple.ProjectDetails:
"""
Parse response of simple API and return a dict of project details.
"""
return simple.parse_project_details(content, content_type, project_name)


def _get_content_type(headers: dict[str, str]) -> str:
"""
Get the content-type header from the headers,
and make sure it's a valid content-type that can be returned from the simple API.

Borrowed from: https://github.com/pypa/pip/blob/main/src/pip/_internal/index/collector.py
"""
content_type = headers.get("content-type", "Unknown")

content_type_l = content_type.lower()
if content_type_l.startswith(
(
"text/html",
"application/vnd.pypi.simple.v1+html",
"application/vnd.pypi.simple.v1+json",
)
):
return content_type_l

raise ValueError(f"Invalid content-type: {content_type}")


async def fetch_project_details(
project_name: str,
index_url: str | list[str] | None = None,
fetch_kwargs: dict[str, str] | None = None,
) -> simple.ProjectDetails:
"""
Fetch the project details from the given index URLs.

Parameters
----------
project_name:
The name of the project to fetch details for.

index_url:
The index URL to fetch from. If None, the default index URL is https://pypi.org/simple/.
The index URL may be a string or a list of strings. If a list, the first URL
that returns a valid response is used.

The index URL must support Simple Repository API (PEP 503, PEP 691).

fetch_kwargs:
Additional keyword arguments to pass to the fetch function.

Returns
-------
A list of project file details.

"""
if index_url is None:
index_url = DEFAULT_INDEX_URL

if isinstance(index_url, str):
index_url = [index_url]

# Prefer JSON, but fall back to HTML if necessary
_fetch_kwargs = fetch_kwargs.copy() if fetch_kwargs else {}
_fetch_kwargs.setdefault(
"Accept", "application/vnd.pypi.simple.v1+json, */*;q=0.01"
)

for url in index_url:
url = url.rstrip("/") + "/"
try:
project_url = url + project_name + "/"
content, headers = await fetch_string_and_headers(
project_url, _fetch_kwargs
)
content_type = _get_content_type(headers)
break
except OSError:
continue
else: # no break
raise ValueError(
f"Can't fetch metadata for '{project_name}' from any index. "
"Please make sure you have entered a correct package name."
)

try:
details = _parse_project_details(content, content_type, project_name)
except simple.UnsupportedMIMEType:
raise ValueError(
f"Invalid content type '{content_type}' for '{project_name}' from index '{url}'. "
) from None

return details
2 changes: 2 additions & 0 deletions micropip/constants.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
FAQ_URLS = {
"cant_find_wheel": "https://pyodide.org/en/stable/usage/faq.html#micropip-can-t-find-a-pure-python-wheel"
}

DEFAULT_INDEX_URL = "https://pypi.org/simple/"
Loading