#забираем содержание страницы
def get_page_data(html):
soup = BeautifulSoup(html, 'lxml')
try:
name = soup.find('h1').text.strip()
except:
name = ''
try:
article = soup.find_all('span').text.strip()
except:
article = ''
try:
komplekt = soup.find_all('strong').text.strip()
except:
komplekt = ''
try:
composition = soup.find_all('p').text.strip()
except:
composition = ''
try:
characteristics = soup.find('ul').find_all('li').text.strip()
except:
characteristics = ''
data = {'name': name,
'article': article,
'komplekt': komplekt,
'composition': composition,
'characteristics': characteristics}
print(data)
return data
I9C30LDQsdC40YDQsNC10Lwg0YHQvtC00LXRgNC20LDQvdC40LUg0YHRgtGA0LDQvdC40YbRiwpkZWYgZ2V0X3BhZ2VfZGF0YShodG1sKToKCXNvdXAgPSBCZWF1dGlmdWxTb3VwKGh0bWwsICdseG1sJykKCgl0cnk6CgkJbmFtZSA9IHNvdXAuZmluZCgnaDEnKS50ZXh0LnN0cmlwKCkKCWV4Y2VwdDoKCQluYW1lID0gJycKCgl0cnk6CgkJYXJ0aWNsZSA9IHNvdXAuZmluZF9hbGwoJ3NwYW4nKS50ZXh0LnN0cmlwKCkKCWV4Y2VwdDoKCQlhcnRpY2xlID0gJycKCgl0cnk6CgkJa29tcGxla3QgPSBzb3VwLmZpbmRfYWxsKCdzdHJvbmcnKS50ZXh0LnN0cmlwKCkKCWV4Y2VwdDoKCQlrb21wbGVrdCA9ICcnCgoJdHJ5OgoJCWNvbXBvc2l0aW9uID0gc291cC5maW5kX2FsbCgncCcpLnRleHQuc3RyaXAoKQoJZXhjZXB0OgoJCWNvbXBvc2l0aW9uID0gJycKCgl0cnk6CgkJY2hhcmFjdGVyaXN0aWNzID0gc291cC5maW5kKCd1bCcpLmZpbmRfYWxsKCdsaScpLnRleHQuc3RyaXAoKQoJZXhjZXB0OgoJCWNoYXJhY3RlcmlzdGljcyA9ICcnCgoKCWRhdGEgPSB7J25hbWUnOiBuYW1lLAoJCQknYXJ0aWNsZSc6IGFydGljbGUsCgkJCSdrb21wbGVrdCc6IGtvbXBsZWt0LAoJCQknY29tcG9zaXRpb24nOiBjb21wb3NpdGlvbiwKCQkJJ2NoYXJhY3RlcmlzdGljcyc6IGNoYXJhY3RlcmlzdGljc30KCXByaW50KGRhdGEpCglyZXR1cm4gZGF0YQ==