-
Notifications
You must be signed in to change notification settings - Fork 0
/
parser.py
79 lines (61 loc) · 2.53 KB
/
parser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import logging
from time import sleep
from urllib.parse import urljoin, urlsplit
import requests
from bs4 import BeautifulSoup
logger = logging.getLogger(__file__)
def check_for_redirect(response: requests.Response):
if response.history:
raise requests.exceptions.HTTPError('Книга не найдена')
def extract_comments(soup: BeautifulSoup):
"""Извлечь список комментариев"""
comments_selector = 'div.texts span.black'
return [comment.text for comment in soup.select(comments_selector)]
def extract_genres(soup: BeautifulSoup):
"""Извлечь список жанров"""
genres_selector = 'span.d_book a'
return [genre.text for genre in soup.select(genres_selector)]
def parse_book_page(html_content: str, book_url: str):
"""Парсинг страницы книги"""
soup = BeautifulSoup(html_content, 'lxml')
book_name_selector = 'td.ow_px_td h1'
book_name = soup.select_one(book_name_selector).text.split('::')
img_selector = 'div.bookimage img'
img_src = soup.select_one(img_selector)['src']
title, author = book_name
title = title.strip()
book_metadata = {
'title': title,
'author': author.strip(),
'img_src': urljoin(book_url, str(img_src)),
'img_filename': str(urlsplit(img_src).path.split('/')[-1]),
'book_filename': f'{title}.txt',
'comments': extract_comments(soup),
'genres': extract_genres(soup)
}
return book_metadata
def extract_book_ids(soup: BeautifulSoup):
"""Извлечь id книг на странице"""
book_ids = []
books_selector = 'body table.d_book tr:nth-child(2) a'
for book in soup.select(books_selector):
book_ids.append(book['href'].strip('/b'))
return book_ids
def get_book_ids(genre_url: str, start_page: int, end_page: int):
"""Получить id книг по заданным страницам"""
book_ids = []
for page in range(start_page, end_page + 1):
page_url = f'{genre_url}/{page}/'
try:
response = requests.get(page_url)
response.raise_for_status()
check_for_redirect(response)
soup = BeautifulSoup(response.text, 'lxml')
book_ids.extend(extract_book_ids(soup))
except requests.exceptions.ConnectionError as connect_err:
logging.error(connect_err)
sleep(10)
except requests.exceptions.HTTPError as http_err:
logging.error(http_err)
continue
return book_ids