-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathncmapis.py
135 lines (121 loc) · 3.96 KB
/
ncmapis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import json
from urllib import parse
from typing import Any, Optional
import requests
from bs4 import BeautifulSoup as BS
from retry import retry
RETRY_TIME = 3
HEADERS = {
"Host": "music.163.com",
"Referer": "https://music.163.com/",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
}
global_session = requests.Session()
def get_str(
url: str,
headers: Optional[dict[str, Any]] = None,
session: Optional[requests.Session] = None,
**kwargs,
):
if headers is None:
kwargs["headers"] = HEADERS.copy()
kwargs.setdefault("timeout", 10)
session = global_session if session is None else session
with session.get(url, **kwargs) as resp:
resp.raise_for_status()
return resp.text
@retry(tries=RETRY_TIME, delay=0.5)
def get_info(mid: int) -> dict[str, Any]:
url = "https://music.163.com/song?id={mid}".format(mid=mid)
bs = BS(get_str(url), "html.parser")
res = {
"title": bs.find("em", class_="f-ff2").get_text(),
"artists": [
i.get_text()
for i in bs.find_all("p", class_="des s-fc4")[0].find_all(
"a", class_="s-fc7"
)
],
#'subtitle':bs.find('div',class_='subtit f-fs1 f-ff2').get_text(),
"cover": bs.find("img", class_="j-img").attrs["data-src"],
"mid": int(mid),
}
try:
res["album"] = (
bs.find_all("p", class_="des s-fc4")[1].find("a", class_="s-fc7").get_text()
)
res["album_id"] = int(
bs.find_all("p", class_="des s-fc4")[1]
.find("a", class_="s-fc7")
.attrs["href"]
.split("?id=")[-1]
)
except IndexError:
res["album"] = None
res["album_id"] = None
return res
@retry(tries=RETRY_TIME, delay=0.5)
def get_album(aid: int) -> dict:
url = "https://music.163.com/album?id={aid}".format(aid=aid)
bs = BS(get_str(url), "html.parser")
data = json.loads(bs.find("textarea", id="song-list-pre-data").get_text())
res = {
"music_list": [
{
"order": i["no"],
"title": i["name"],
"mid": i["id"],
"artists": [a["name"] for a in i["artists"]],
}
for i in data
],
"aid": aid,
"title": bs.find("h2", class_="f-ff2").get_text(),
"artists": [
i.get_text()
for i in bs.find("p", class_="intr").find_all("a", class_="s-fc7")
],
}
return res
@retry(tries=RETRY_TIME, delay=0.5)
def search_music(*kws, limit: int = 10, offset: int = 0) -> list[dict[str, Any]]:
url = "https://music.163.com/api/search/get/?s={}&limit={}&type=1&offset={}".format(
"+".join([parse.quote(kw) for kw in kws]), limit, offset
)
data = json.loads(get_str(url))
if "result" not in data:
return []
if "songs" not in data["result"]:
return []
res = [
{
"mid": i["id"],
"title": i["name"],
"artists": [a["name"] for a in i["artists"]],
"album": i["album"]["name"],
"album_id": i["album"]["id"],
#'trans_titles':i['transNames'],
}
for i in data["result"]["songs"]
]
return res
@retry(tries=RETRY_TIME, delay=0.5)
def get_lyrics(mid: int) -> tuple[Optional[str], Optional[str]]:
"""
返回一个元组, 第一个项是原文, 第二个项是翻译
没有的用 None 占位
"""
api = f"https://music.163.com/api/song/lyric?id={str(mid)}&lv=-1&kv=-1&tv=-1"
data = json.loads(get_str(api))
if "lrc" in data:
lyrics = data["lrc"]["lyric"]
if "tlyric" in data:
if tl := data["tlyric"]["lyric"].strip():
lyrics_trans = tl
else:
lyrics_trans = None
else:
lyrics_trans = None
else:
lyrics = lyrics_trans = None
return lyrics, lyrics_trans