Skip to content

Commit ddea04e

Browse files
committedJun 23, 2016
0
1 parent f1c5aaa commit ddea04e

11 files changed

+353
-0
lines changed
 

‎.gitignore

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
/no_use

‎CpuToInfluxdb.py

+53
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
import psutil
2+
import os
3+
from influxdb import InfluxDBClient
4+
import time,math,random
5+
6+
7+
#获取当前运行的pid
8+
p1=psutil.Process(os.getpid())
9+
10+
#打印本机的内存信息
11+
#print ('直接打印内存占用: '+(str)(psutil.virtual_memory))
12+
13+
#打印内存的占用率
14+
15+
16+
17+
18+
19+
20+
from influxdb import InfluxDBClient
21+
import time,math,random
22+
while True:
23+
# for i in range(360):
24+
25+
# sin = round(random.random()*1000,2)
26+
# print (sin)
27+
a = psutil.virtual_memory().percent
28+
29+
#本机cpu的总占用率
30+
b = psutil.cpu_percent(interval=1.0)
31+
32+
json_body = [
33+
{
34+
"measurement": "cpu_load_short",
35+
"tags": {
36+
"host": "server01",
37+
"region": "us-west"
38+
},
39+
#"time": "2009-11-10T23:00:00Z",
40+
"fields": {
41+
"cpu": b,
42+
"mem": a
43+
}
44+
}
45+
]
46+
client = InfluxDBClient('localhost', 8086, 'root', 'root', 'xxyyxx')
47+
print('aaaaaa')
48+
#client.create_database('xxyyxx',if_not_exists=False)
49+
print('bbbbb')
50+
client.write_points(json_body)
51+
#result = client.query('select value from cpu_load_short;')
52+
#print("Result: {0}".format(result))
53+
time.sleep(2)

‎ModifyFilename.py

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
import os
2+
dir = os.getcwd()
3+
subdir = os.listdir(dir)
4+
for i in subdir:
5+
path = os.path.join(dir,i)
6+
if os.path.isdir(path):
7+
end_dir = os.listdir(path)
8+
for i in range(len(end_dir)):
9+
newname = end_dir[i][0:50]
10+
os.rename(os.path.join(path,end_dir[i]),os.path.join(path,newname))

‎countFile.py

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import os
2+
result = []
3+
def get_all(cwd):
4+
get_dir = os.listdir(cwd) #遍历当前目录,获取文件列表
5+
for i in get_dir:
6+
sub_dir = os.path.join(cwd,i) # 把第一步获取的文件加入路径
7+
if os.path.isdir(sub_dir): #如果当前仍然是文件夹,递归调用
8+
get_all(sub_dir)
9+
else:
10+
ax = os.path.basename(sub_dir) #如果当前路径不是文件夹,则把文件名放入列表
11+
result.append(ax)
12+
print(len(result)) #对列表计数
13+
14+
if __name__ == "__main__":
15+
cur_path = os.getcwd() #当前目录
16+
get_all(cur_path)

‎countPm.py

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
# -*- coding:utf-8 -*-
2+
def count_pm(*args):
3+
alist = list([round(i*2-8,2) for i in args]) #计算三种颗粒浓度
4+
result = []
5+
for pm in alist:
6+
pm_abs = abs(pm)
7+
result.append(generate_iso_code(pm_abs))
8+
print (result)
9+
return result
10+
11+
def generate_iso_code(x):
12+
pm_value = [0.01,0.02,0.04,0.08,0.16,0.32,0.64,1.3,2.5,5,10,20,40,80] #颗粒浓度
13+
iso = list(range(1,25)) #iso级别,共24级
14+
for i in range(len(pm_value)): #for循环得到某个浓度范围的iso4006级别
15+
if pm_value[i] < x <= pm_value[i+1]:
16+
iso_code = iso[i]
17+
break
18+
return iso_code
19+
20+
if __name__ == '__main__':
21+
count_pm(7.95,5.85,3.98)
22+
count_pm(7.918,5.949,5.456)
23+
count_pm(6.916,3.956,3.956)

‎douban_book.py

+55
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
from bs4 import BeautifulSoup
2+
import requests
3+
from openpyxl import Workbook
4+
excel_name = "书籍.xlsx"
5+
wb = Workbook()
6+
ws1 = wb.active
7+
ws1.title='书籍'
8+
9+
10+
def get_html(url):
11+
header = {
12+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'}
13+
html = requests.get(url, headers=header).content
14+
return html
15+
16+
17+
def get_con(html):
18+
soup = BeautifulSoup(html,'html.parser')
19+
book_list = soup.find('div', attrs={'class': 'article'})
20+
page = soup.find('div', attrs={'class': 'paginator'})
21+
next_page = page.find('span', attrs={'class': 'next'}).find('a')
22+
name = []
23+
for i in book_list.find_all('table'):
24+
book_name = i.find('div', attrs={'class': 'pl2'})
25+
m = list(book_name.find('a').stripped_strings)
26+
if len(m)>1:
27+
x = m[0]+m[1]
28+
else:
29+
x = m[0]
30+
#print(x)
31+
name.append(x)
32+
if next_page:
33+
return name, next_page.get('href')
34+
else:
35+
return name, None
36+
37+
38+
def main():
39+
url = 'https://book.douban.com/top250'
40+
name_list=[]
41+
while url:
42+
html = get_html(url)
43+
name, url = get_con(html)
44+
name_list = name_list + name
45+
for i in name_list:
46+
location = 'A%s'%(name_list.index(i)+1)
47+
print(i)
48+
print(location)
49+
ws1[location]=i
50+
wb.save(filename=excel_name)
51+
52+
53+
if __name__ == '__main__':
54+
main()
55+

‎douban_movie.py

+79
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
#!/usr/bin/env python
2+
# encoding=utf-8
3+
import requests,re
4+
import codecs
5+
from bs4 import BeautifulSoup
6+
from openpyxl import Workbook
7+
wb = Workbook()
8+
dest_filename = '电影.xlsx'
9+
ws1 = wb.active
10+
ws1.title = "电影top250"
11+
12+
DOWNLOAD_URL = 'http://movie.douban.com/top250/'
13+
14+
15+
def download_page(url):
16+
"""获取url地址页面内容"""
17+
headers = {
18+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36'
19+
}
20+
data = requests.get(url, headers=headers).content
21+
return data
22+
23+
24+
def get_li(doc):
25+
soup = BeautifulSoup(doc, 'html.parser')
26+
ol = soup.find('ol', class_='grid_view')
27+
name = [] #名字
28+
star_con = [] #评价人数
29+
score = [] #评分
30+
info_list = [] #短评
31+
for i in ol.find_all('li'):
32+
detail = i.find('div', attrs={'class': 'hd'})
33+
movie_name = detail.find('span', attrs={'class': 'title'}).get_text() #电影名字
34+
level_star = i.find('span',attrs={'class':'rating_num'}).get_text() #评分
35+
star = i.find('div',attrs={'class':'star'})
36+
star_num = star.find(text=re.compile('评价')) #评价
37+
38+
info = i.find('span',attrs={'class':'inq'}) #短评
39+
if info: #判断是否有短评
40+
info_list.append(info.get_text())
41+
else:
42+
info_list.append('无')
43+
score.append(level_star)
44+
45+
46+
name.append(movie_name)
47+
star_con.append(star_num)
48+
page = soup.find('span', attrs={'class': 'next'}).find('a') #获取下一页
49+
if page:
50+
return name,star_con,score,info_list,DOWNLOAD_URL + page['href']
51+
return name,star_con,score,info_list,None
52+
53+
54+
def main():
55+
url = DOWNLOAD_URL
56+
name = []
57+
star_con=[]
58+
score = []
59+
info = []
60+
while url:
61+
doc = download_page(url)
62+
movie,star,level_num,info_list,url = get_li(doc)
63+
name = name + movie
64+
star_con = star_con + star
65+
score = score+level_num
66+
info = info+ info_list
67+
for (i,m,o,p) in zip(name,star_con,score,info):
68+
col_A = 'A%s'%(name.index(i)+1)
69+
col_B = 'B%s'%(name.index(i)+1)
70+
col_C = 'C%s'%(name.index(i)+1)
71+
col_D = 'D%s'%(name.index(i)+1)
72+
ws1[col_A]=i
73+
ws1[col_B] = m
74+
ws1[col_C] = o
75+
ws1[col_D] = p
76+
wb.save(filename=dest_filename)
77+
78+
if __name__ == '__main__':
79+
main()

‎excelToDatabase.py

+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
from openpyxl import load_workbook
2+
import pymysql
3+
config = {
4+
'host': '127.0.0.1',
5+
'port':3306,
6+
'user': 'root',
7+
'password': 'root',
8+
'charset': 'utf8mb4',
9+
#'cursorclass': pymysql.cursors.DictCursor
10+
11+
}
12+
conn = pymysql.connect(**config)
13+
conn.autocommit(1)
14+
cursor = conn.cursor()
15+
name = 'lyexcel'
16+
cursor.execute('create database if not exists %s' %name)
17+
conn.select_db(name)
18+
table_name = 'info'
19+
cursor.execute('create table if not exists %s(id MEDIUMINT NOT NULL AUTO_INCREMENT,name varchar(30),tel varchar(30),primary key (id))'%table_name)
20+
21+
wb2 = load_workbook('hpu.xlsx')
22+
ws=wb2.get_sheet_names()
23+
for row in wb2:
24+
print("1")
25+
for cell in row:
26+
value1=(cell[0].value,cell[4].value)
27+
cursor.execute('insert into info (name,tel) values(%s,%s)',value1)
28+
29+
print("overing...")
30+
# for row in A:
31+
# print(row)
32+
#print (wb2.get_sheet_names())

‎lagouSpider.py

+37
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import requests
Has conversations. Original line has conversations.
2+
from openpyxl import Workbook
3+
Has conversations. Original line has conversations.
4+
def get_json(url, page, lang_name):
5+
data = {'first': 'true', 'pn': page, 'kd': lang_name}
6+
json = requests.post(url, data).json()
7+
list_con = json['content']['positionResult']['result']
8+
info_list = []
9+
for i in list_con:
10+
info = []
11+
info.append(i['companyShortName'])
12+
info.append(i['companyName'])
13+
info.append(i['salary'])
14+
info.append(i['city'])
15+
info.append(i['education'])
16+
info_list.append(info)
17+
return info_list
18+
19+
20+
def main():
21+
lang_name = input('职位名:')
22+
page = 1
23+
url = 'http://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'
24+
info_result = []
25+
while page < 31:
26+
info = get_json(url, page, lang_name)
27+
info_result = info_result + info
28+
page += 1
29+
wb = Workbook()
30+
ws1 = wb.active
31+
ws1.title = lang_name
32+
for row in info_result:
33+
ws1.append(row)
34+
wb.save('职位信息.xlsx')
35+
36+
if __name__ == '__main__':
37+
main()

‎login_zhihu.py

+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import requests,time
2+
from bs4 import BeautifulSoup
3+
url = 'https://www.zhihu.com/login/email'
4+
def get_captcha(data):
5+
with open('captcha.gif','wb') as fb:
6+
fb.write(data)
7+
return input('captcha')
8+
9+
def login(username,password,oncaptcha):
10+
sessiona = requests.Session()
11+
print(sessiona)
12+
print('aaaa')
13+
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'}
14+
xyz = sessiona.get('https://www.zhihu.com/#signin',headers=headers).content
15+
_xsrf = BeautifulSoup(sessiona.get('https://www.zhihu.com/#signin',headers=headers).content,'html.parser').find('input',attrs={'name':'_xsrf'}).get('value')
16+
captcha_content = sessiona.get('https://www.zhihu.com/captcha.gif?r=%d&type=login'%(time.time()*1000),headers=headers).content
17+
data = {
18+
"_xsrf":_xsrf,
19+
"email":username,
20+
"password":password,
21+
"remember_me":True,
22+
"captcha":oncaptcha(captcha_content)
23+
}
24+
print(data)
25+
resp = sessiona.post('https://www.zhihu.com/login/email',data,headers=headers).content
26+
print(resp)
27+
return resp
28+
29+
if __name__ == "__main__":
30+
login('email','password',get_captcha)

‎readExcel.py

+17
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
from openpyxl import Workbook
2+
from openpyxl.compat import range
3+
from openpyxl.cell import get_column_letter
4+
wb = Workbook()
5+
dest_filename = 'empty_book2.xlsx'
6+
ws1 = wb.active #第一个表
7+
ws1.title = "range names" #第一个表命名
8+
#遍历第一个表的1到40行,赋值一个600内的随机数。
9+
for row in range(1,40):
10+
ws1.append(range(60))
11+
ws2 = wb.create_sheet(title="Pi")
12+
ws2['F5'] = 3.14
13+
ws3 = wb.create_sheet(title="Data")
14+
for row in range(10,20):
15+
for col in range(27,54):
16+
_=ws3.cell(column=col,row=row,value="%s" % get_column_letter(col))
17+
wb.save(filename=dest_filename)

0 commit comments

Comments
 (0)
Please sign in to comment.