成都创新互联网站制作重庆分公司

python爬取相关网站一些信息

import requests
from bs4 import BeautifulSoup

def getpage(url):

    responce = requests.get(url)
    soup = BeautifulSoup(responce.text,'lxml')
    return soup

def getlinks(link_url):
    responce = requests.get(link_url)
    format_list = BeautifulSoup(responce.text,'lxml')
    link_div = format_list.find_all('div',class_='pic-panel')
    links = [div.a.get('href') for div in link_div]
    return links
url = 'https://bj.lianjia.com/zufang/'

house_url = 'https://bj.lianjia.com/zufang/101102926709.html'
def get_house_info(house_url):

    # li = getlinks(url)
    # print(li)

    soup = getpage(house_url)
    prince = soup.find('span',class_='total').text
    unit = soup.find('span',class_='unit').text.strip()
    house_info = soup.find_all('p')
    area = house_info[0].text[3:]
    layout = house_info[1].text[5:]
    floor = house_info[2].text[3:]
    direction = house_info[3].text[5:]
    location = house_info[4].text[3:]
    xiaoqu_location = house_info[5].text[3:7]
    create_time = house_info[6].text[3:]
    info ={'面积':area,
    '分布':layout,
    '楼层':floor,
    '方向':direction,
    '价格':prince,
    '单价':unit,
    '地铁':location,
    '小区':xiaoqu_location,
    '时间':create_time
    }
    return info
house = get_house_info(house_url)
for k,v in house.items():
    print('{}:{}'.format(k,v))

网站栏目:python爬取相关网站一些信息
文章路径:http://cxhlcq.com/article/jhgcjh.html

其他资讯

在线咨询

微信咨询

电话咨询

028-86922220(工作日)

18980820575(7×24)

提交需求

返回顶部