python爬取相关网站一些信息-成都快上网建站

python爬取相关网站一些信息

import requests
from bs4 import BeautifulSoup

def getpage(url):

    responce = requests.get(url)
    soup = BeautifulSoup(responce.text,'lxml')
    return soup

def getlinks(link_url):
    responce = requests.get(link_url)
    format_list = BeautifulSoup(responce.text,'lxml')
    link_div = format_list.find_all('div',class_='pic-panel')
    links = [div.a.get('href') for div in link_div]
    return links
url = 'https://bj.lianjia.com/zufang/'

house_url = 'https://bj.lianjia.com/zufang/101102926709.html'
def get_house_info(house_url):

    # li = getlinks(url)
    # print(li)

    soup = getpage(house_url)
    prince = soup.find('span',class_='total').text
    unit = soup.find('span',class_='unit').text.strip()
    house_info = soup.find_all('p')
    area = house_info[0].text[3:]
    layout = house_info[1].text[5:]
    floor = house_info[2].text[3:]
    direction = house_info[3].text[5:]
    location = house_info[4].text[3:]
    xiaoqu_location = house_info[5].text[3:7]
    create_time = house_info[6].text[3:]
    info ={'面积':area,
    '分布':layout,
    '楼层':floor,
    '方向':direction,
    '价格':prince,
    '单价':unit,
    '地铁':location,
    '小区':xiaoqu_location,
    '时间':create_time
    }
    return info
house = get_house_info(house_url)
for k,v in house.items():
    print('{}:{}'.format(k,v))

分享文章:python爬取相关网站一些信息
网站网址:http://kswjz.com/article/jhgcjh.html
扫二维码与项目经理沟通

我们在微信上24小时期待你的声音

解答本文疑问/技术咨询/运营咨询/技术建议/互联网交流