python爬取福布斯富人榜并进行数据可视化

2021/6/26 9:26:53

本文主要是介绍python爬取福布斯富人榜并进行数据可视化,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

python爬取福布斯富人榜并进行数据可视化

一,选题背景

如今,人类社会已经进入了大数据时代,数据已经成为必不可少的部分,可见数据的获取非常重要,而数据的获取的方式大概有下面几种。

1、企业生产的数据,大型互联网公司有海量的用户,所以他们积累数据有天然的优势

2、数据管理资讯公司

3、政府/机构提供的公开数据

4、第三方数据平台购买数据爬虫爬取数据

二,爬虫设计方案

1、项目名称:python爬取福布斯富人榜并进行数据可视化

2、数据的获取与数据特点的分析

三,结构特征分析

1、页面解析:

 

 

 四,程序设计

1、读取爬取页面链接结构

 

def loadalldata():
   alldata = []
   for i in range(1,16,1):
      url = "https://www.phb123.com/renwu/fuhao/shishi_"+str(i)+".html"
      data = loaddata(url)
      alldata = alldata + data
   return alldata

 

2、读取一页的数据

def loaddata(url):
   from bs4 import BeautifulSoup
   import requests
   headers = {
       'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) '
                    'Chrome/72.0.3626.121 Safari/537.36'
   }
   f = requests.get(url,headers=headers)
   soup = BeautifulSoup(f.content, "lxml")
   # print(f.content.decode())
   ranktable = soup.find_all('table',class_="rank-table" )[0]
   trlist = ranktable.find_all('tr')
   trlist.pop(0)
   persionlist = []
   for tr in trlist:
      persion = {}
      persion['num'] = tr.find_all('td')[0].string
      persion['name'] = tr.find_all('td')[1].p.string
      persion['money'] = tr.find_all('td')[2].string
      persion['company'] = tr.find_all('td')[3].string
      persion['country'] = tr.find_all('td')[4].a.string
      persionlist.append(persion)
   print("页面"+url+"爬取成功")
   return persionlist

3、将爬取到的数据保存到本地Excel文件

 

 

 

def savedata(path,persionlist):
   import xlwt
   workbook = xlwt.Workbook()
   worksheet = workbook.add_sheet('test')
   worksheet.write(0, 0, '排名')
   worksheet.write(0, 1, '姓名')
   worksheet.write(0, 2, '财富')
   worksheet.write(0, 3, '企业')
   worksheet.write(0, 4, '国家')
   for i in range(1,len(persionlist)+1,1):
      worksheet.write(i, 0, persionlist[i-1]['num'])
      worksheet.write(i, 1, persionlist[i-1]['name'])
      worksheet.write(i, 2, persionlist[i-1]['money'])
      worksheet.write(i, 3, persionlist[i-1]['company'])
      worksheet.write(i, 4, persionlist[i-1]['country'])
   workbook.save(path)
   print("数据保存成功:"+path)

4、取出排行榜前十的姓名和财富数据 以两个list返回

def loadtop10(path):
    import xlrd
    book = xlrd.open_workbook(path)
    sheet1 = book.sheets()[0]
    namelist = sheet1.col_values(1)
    moneylist = sheet1.col_values(2)
    namelist = namelist[1:11]
    moneylist = moneylist[1:11]
 
    moneylist2 = []
    for a in moneylist:
        a = int(a[0:-3])
        moneylist2.append(a)
    print("取出排行榜前十的姓名和财富数据")
    print(namelist)
    print(moneylist2)
    return namelist,moneylist2
def countcountrynum(path):
   import xlrd
   book = xlrd.open_workbook(path)
   sheet1 = book.sheets()[0]
   countrylist = sheet1.col_values(4)[1:-1]
   print(countrylist)
   countryset = list(set(countrylist))
   dictlist = []
   for country in countryset:
      obj = {"name":country,"count":0}
      dictlist.append(obj)
   for obj in dictlist:
      for a in countrylist:
         if obj['name'] == a:
            obj['count'] = obj['count'] + 1
   print(dictlist)
   for i in range(0,len(dictlist),1):
      for j in range(0,len(dictlist)-i-1,1):
          if dictlist[j]['count'] < dictlist[j+1]['count']:
             temp = dictlist[j]
             dictlist[j] = dictlist[j+1]
             dictlist[j+1] = temp
   dictlist2 = dictlist[0:5]
   set2 = []
   for a in dictlist2:
      set2.append(a['name'])
   othercount = 0;
   for a in dictlist:
      if a['name'] not in set2:
         othercount = othercount + 1
   dictlist2.append({"name":"其他","count":othercount})
   print('获取排行榜中每个国家的上榜人数')
   print(dictlist2)
   return dictlist2
def drow():
   import matplotlib.pyplot as plt
   plt.rcParams['font.sans-serif'] = ['SimHei']
   plt.figure('福布斯前十榜',figsize=(15,5))
   listx,listy = loadtop10('rank.xls')
   plt.title('福布斯前十榜', fontsize=16)
   plt.xlabel('人物', fontsize=14)
   plt.ylabel('金额/亿美元', fontsize=14)
   plt.tick_params(labelsize=10)
   plt.grid(linestyle=':', axis='y')
   a = plt.bar(listx, listy, color='dodgerblue', label='Apple', align='center')
   for i in a:
      h = i.get_height()
      plt.text(i.get_x() + i.get_width() / 2, h, '%d' % int(h), ha='center', va='bottom')
   dictlist = countcountrynum("rank.xls")
   plt.figure('各国家上榜人数所占比例')
   labels = []
   sizes = []
   for a in dictlist:
      labels.append(a['name'])
      sizes.append(a['count'])
   explode = (0.1, 0, 0, 0, 0, 0)
   plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=False, startangle=150)
   plt.title("各国家上榜人数所占比例", fontsize=16)
   plt.axis('equal')
 
   plt.show()

5、完整代码

 

 ## 读取一页的数据
def loaddata(url):
   from bs4 import BeautifulSoup
   import requests
   headers = {
       'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) '
                    'Chrome/72.0.3626.121 Safari/537.36'
   }
   f = requests.get(url,headers=headers)   #Get该网页从而获取该html内容
   soup = BeautifulSoup(f.content, "lxml")  #用lxml解析器解析该网页的内容, 好像f.text也是返回的html
   # print(f.content.decode())        #尝试打印出网页内容,看是否获取成功
   ranktable = soup.find_all('table',class_="rank-table" )[0]   #获取排行榜表格
   trlist = ranktable.find_all('tr') #获取表格中所有tr标签
   trlist.pop(0) #去掉第一个元素
   persionlist = []
   for tr in trlist:
      persion = {}
      persion['num'] = tr.find_all('td')[0].string  #编号
      persion['name'] = tr.find_all('td')[1].p.string #名称
      persion['money'] = tr.find_all('td')[2].string #财产
      persion['company'] = tr.find_all('td')[3].string #企业
      persion['country'] = tr.find_all('td')[4].a.string #国家
      persionlist.append(persion)
   print("页面"+url+"爬取成功")
   return persionlist
 
 
## 读取所有福布斯排行榜数据
def loadalldata():
   alldata = []
   for i in range(1,16,1):
      url = "https://www.phb123.com/renwu/fuhao/shishi_"+str(i)+".html"
      data = loaddata(url)
      alldata = alldata + data
   return alldata
 
## 将爬取的数据保存到文件
def savedata(path,persionlist):
   import xlwt
   workbook = xlwt.Workbook()
   worksheet = workbook.add_sheet('test')
   worksheet.write(0, 0, '排名')
   worksheet.write(0, 1, '姓名')
   worksheet.write(0, 2, '财富')
   worksheet.write(0, 3, '企业')
   worksheet.write(0, 4, '国家')
   for i in range(1,len(persionlist)+1,1):
      worksheet.write(i, 0, persionlist[i-1]['num'])
      worksheet.write(i, 1, persionlist[i-1]['name'])
      worksheet.write(i, 2, persionlist[i-1]['money'])
      worksheet.write(i, 3, persionlist[i-1]['company'])
      worksheet.write(i, 4, persionlist[i-1]['country'])
   workbook.save(path)
   print("数据保存成功:"+path)
 
## 取出排行榜前十的姓名和财富数据 以两个list返回
def loadtop10(path):
    import xlrd
    book = xlrd.open_workbook(path)
    sheet1 = book.sheets()[0]
    namelist = sheet1.col_values(1)
    moneylist = sheet1.col_values(2)
    namelist = namelist[1:11]
    moneylist = moneylist[1:11]
 
    moneylist2 = []
    for a in moneylist:
        a = int(a[0:-3])
        moneylist2.append(a)
    print("取出排行榜前十的姓名和财富数据")
    print(namelist)
    print(moneylist2)
    return namelist,moneylist2
 
## 统计排行榜中每个国家的上榜人数 以字典list返回
def countcountrynum(path):
   import xlrd
   book = xlrd.open_workbook(path)
   sheet1 = book.sheets()[0]
   countrylist = sheet1.col_values(4)[1:-1]
   print(countrylist)
   countryset = list(set(countrylist))
   dictlist = []
   for country in countryset:
      obj = {"name":country,"count":0}
      dictlist.append(obj)
   ## 统计出每个国家对应的数量
   for obj in dictlist:
      for a in countrylist:
         if obj['name'] == a:
            obj['count'] = obj['count'] + 1
   print(dictlist)
   ## 将dictlist排序 数量多的放前面 8 5 6 9 3 2 4
   for i in range(0,len(dictlist),1):
      for j in range(0,len(dictlist)-i-1,1):
          if dictlist[j]['count'] < dictlist[j+1]['count']:
             temp = dictlist[j]
             dictlist[j] = dictlist[j+1]
             dictlist[j+1] = temp
   dictlist2 = dictlist[0:5]
   set2 = []
   for a in dictlist2:
      set2.append(a['name'])
   othercount = 0;
   for a in dictlist:
      if a['name'] not in set2:
         othercount = othercount + 1
   dictlist2.append({"name":"其他","count":othercount})
   print('获取排行榜中每个国家的上榜人数')
   print(dictlist2)
   return dictlist2
 
## 绘制条形图和饼状图
def drow():
   import matplotlib.pyplot as plt
   plt.rcParams['font.sans-serif'] = ['SimHei'] # 设置中文字体
   plt.figure('福布斯前十榜',figsize=(15,5))
 
   ## 读取福布斯排行榜前十的数据
   listx,listy = loadtop10('rank.xls')
 
   plt.title('福布斯前十榜', fontsize=16)
   plt.xlabel('人物', fontsize=14)
   plt.ylabel('金额/亿美元', fontsize=14)
   plt.tick_params(labelsize=10)
   plt.grid(linestyle=':', axis='y')
   a = plt.bar(listx, listy, color='dodgerblue', label='Apple', align='center')
   # 设置标签
   for i in a:
      h = i.get_height()
      plt.text(i.get_x() + i.get_width() / 2, h, '%d' % int(h), ha='center', va='bottom')
   ## -------------------------------------------------------------------------
   dictlist = countcountrynum("rank.xls")
   plt.figure('各国家上榜人数所占比例')
   labels = []
   sizes = []
   for a in dictlist:
      labels.append(a['name'])
      sizes.append(a['count'])
   explode = (0.1, 0, 0, 0, 0, 0)
   plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=False, startangle=150)
   plt.title("各国家上榜人数所占比例", fontsize=16)
   plt.axis('equal')  # 该行代码使饼图长宽相等
 
   plt.show()
 
if __name__ == '__main__':
 
   ## 爬取数据
   data = loadalldata()
   ## 保存数据
   savedata("rank.xls",data)    # py文件同级目录创建rank.xls文件
   ## 展示数据
   drow()

五,效果实现

1、获取到数据的本地Excel文件

 

2、福布斯排行榜前十的人物数据可视化效果

 

 

3、各个国家上榜人数与整体相比所占比例的统计与可视化

 

 

六,项目总结

1、得到的结论

根据饼图发现,美国上榜人数最多,然后中国紧随其后,其次是德国、俄罗斯、印度……等等国家。根据柱状图发现,福布斯排行榜的大部分都所属美国,只有第三位与第十位是法国人。

 

 2、收获

这次爬虫项目的设计与实施让我学会了爬取网站的信息与获取的数据分析。因为这次的项目就很不熟练导致到处碰壁,所以需要学会的知识点还有很多,还需要不断的学习与改进。

 



这篇关于python爬取福布斯富人榜并进行数据可视化的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!


扫一扫关注最新编程教程