commit b0d3a9d4fb345dd9ea4a4bfd05746e0f414fff87 Author: Julius Freudenberger Date: Mon May 1 11:16:10 2023 +0200 Initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..39d6292 --- /dev/null +++ b/.gitignore @@ -0,0 +1,293 @@ +# Created by https://www.toptal.com/developers/gitignore/api/python,pycharm +# Edit at https://www.toptal.com/developers/gitignore?templates=python,pycharm + +urls.txt + +### PyCharm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +.idea/ + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# SonarLint plugin +.idea/sonarlint/ + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint +.idea/**/sonarlint/ + +# SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml +.idea/**/markdown-navigator/ + +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + +# Azure Toolkit for IntelliJ plugin +# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij +.idea/**/azureSettings.xml + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +### Python Patch ### +# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration +poetry.toml + +# ruff +.ruff_cache/ + +# LSP config files +pyrightconfig.json + +# End of https://www.toptal.com/developers/gitignore/api/python,pycharm \ No newline at end of file diff --git a/csv_exporter.py b/csv_exporter.py new file mode 100644 index 0000000..4419e50 --- /dev/null +++ b/csv_exporter.py @@ -0,0 +1,17 @@ +from tripadvisor_attraction import TripadvisorAttraction, TripadvisorReview + +TABLE_HEADING = "username^review_title^review_text^posting_date^count_stars^count_likes^translated_by" + + +def export_attraction(attraction: TripadvisorAttraction): + export_string: str = TABLE_HEADING + '\n' + for review in attraction.reviews: + export_string += line_for_review(review) + '\n' + + with open(f'export/{attraction.title} ({attraction.count_of_reviews}).csv', 'w') as export_file: + export_file.writelines(export_string) + + +def line_for_review(review: TripadvisorReview): + return f'{review.username}^{review.review_title}^{review.review_text}^' \ + f'{review.posting_date}^{review.count_stars}^{review.count_likes}^{review.translated_by}' diff --git a/main.py b/main.py new file mode 100644 index 0000000..efc87c4 --- /dev/null +++ b/main.py @@ -0,0 +1,27 @@ +from csv_exporter import export_attraction +from tripadvisor_attraction import TripadvisorAttraction +from tripadvisor_parser import TripadvisorAttractionParser + + +def parse_locations(urls: [str]) -> [TripadvisorAttraction]: + attractions = [] + for url in urls: + attractions.append(parse_location(url)) + + return attractions + + +def parse_location(url: str) -> TripadvisorAttraction: + parser = TripadvisorAttractionParser(url.strip()) + return parser.parse() + + +def get_urls() -> [str]: + with open("urls.txt", "r") as urls_file: + return urls_file.readlines() + + +if __name__ == "__main__": + attractions = parse_locations(get_urls()) + for attraction in attractions: + export_attraction(attraction) diff --git a/tripadvisor_attraction.py b/tripadvisor_attraction.py new file mode 100644 index 0000000..b4b9d8b --- /dev/null +++ b/tripadvisor_attraction.py @@ -0,0 +1,20 @@ +class TripadvisorReview: + posting_date: str + username: str + review_title: str + review_text: str + count_stars: float + count_likes: int + translated_by: str + + def __str__(self): + return f'{self.posting_date}: {self.review_title} - {self.review_text}' + + +class TripadvisorAttraction: + title: str + count_of_reviews: int + reviews: [TripadvisorReview] + + def __str__(self): + return f'{self.title} ({len(self.reviews)} parsed of {self.count_of_reviews} total reviews)' diff --git a/tripadvisor_parser.py b/tripadvisor_parser.py new file mode 100644 index 0000000..ce78b33 --- /dev/null +++ b/tripadvisor_parser.py @@ -0,0 +1,132 @@ +import math + +import requests +from bs4 import BeautifulSoup, Tag + +from tripadvisor_attraction import TripadvisorAttraction, TripadvisorReview + +USERAGENT = "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0" +HEADERS = {"User-Agent": USERAGENT} + + +class ReviewsPageInformation: + page_size: int + page_count: int + + def __init__(self, page_size, page_count): + self.page_size = page_size + self.page_count = page_count + + +class TripadvisorAttractionReviewParser: + reviews_page_information: ReviewsPageInformation + base_url: str + review_urls: [str] + raw_reviews: [str] + review_soups: [BeautifulSoup] + + def __init__(self, reviews_page_information: ReviewsPageInformation, base_url: str): + self.reviews_page_information = reviews_page_information + self.base_url = base_url + self.review_urls = [] + self.raw_reviews = [] + self.review_soups = [] + + def parse_reviews(self) -> [TripadvisorReview]: + print("Constructing Review URLs") + self.construct_reviews_urls() + print("Requesting Reviews") + self.request_reviews() + print("Souping Reviews") + self.soup_reviews() + + reviews = [] + print("Parsing Reviews") + for review_soup in self.review_soups: + review_cards = review_soup.find_all("div", attrs={"data-automation": "reviewCard"}) + parsed_review_cards = self.parse_review_card(review_cards) + reviews.extend(parsed_review_cards) + + return reviews + + def construct_reviews_urls(self): + [prefix, suffix] = self.base_url.split("-Reviews-") + self.review_urls.append(self.base_url) + for review_index in range(self.reviews_page_information.page_size, + self.reviews_page_information.page_size * self.reviews_page_information.page_count, + self.reviews_page_information.page_size): + self.review_urls.append(f'{prefix}-Reviews-or{review_index}-{suffix}') + + def request_reviews(self): + for review_url in self.review_urls: + self.raw_reviews.append(requests.get(review_url, headers=HEADERS).text) + + def soup_reviews(self): + for review in self.raw_reviews: + self.review_soups.append(BeautifulSoup(review, features="html.parser")) + + def parse_review_card(self, soups: [Tag]) -> [TripadvisorReview]: + reviews = [] + for soup in soups: + reviews.append(self.parse_review(soup)) + return reviews + + def parse_review(self, soup: Tag) -> TripadvisorReview: + [review_title, review_text] = soup.find_all("span", class_="yCeTE") + review = TripadvisorReview() + review.review_title = review_title.text + review.review_text = review_text.text + posting_date_tag = soup.find("div", class_="RpeCd") + if posting_date_tag is not None: + review.posting_date = posting_date_tag.text.split(" •")[0] + else: + review.posting_date = "" + review.username = soup.find("a", class_="ukgoS").text + review.count_stars = float(soup.find("svg", class_="H0").get("aria-label").split(" ")[0].replace(',', '.')) + review.count_likes = int(soup.find("span", class_="biGQs _P FwFXZ").text) + translation_hint = soup.find("span", class_="Ne d Vm") + if translation_hint is not None: + review.translated_by = translation_hint.img.get("alt") + else: + review.translated_by = "" + return review + + +class TripadvisorAttractionParser: + url: str + soup: BeautifulSoup + + def __init__(self, url): + self.url = url + response = requests.get(url, headers=HEADERS) + if response.status_code != 200: + print("No status code 200") + + html = response.text + self.soup = BeautifulSoup(html, features="html.parser") + + def parse(self) -> TripadvisorAttraction: + attraction = TripadvisorAttraction() + attraction.title = self.parse_title() + attraction.count_of_reviews = self.parse_count_of_reviews() + attraction.reviews = self.parse_reviews() + + return attraction + + def parse_title(self) -> str: + return self.soup.find("h1", attrs={"data-automation": "mainH1"}).text + + def parse_count_of_reviews(self) -> int: + return int( + self.soup.find("span", class_="KAVFZ").text.split("\xa0", maxsplit=2)[0].replace(',', '').replace('.', '')) + + def parse_reviews(self) -> [TripadvisorReview]: + pagination_info = self.soup.find("div", class_="Ci").text + first_review_index = int(pagination_info.split(" ")[1].split(" ")[0]) + last_review_index = int(pagination_info.split(" ")[3].split(" ")[0]) + total_review_count = int(self.soup.find("button", class_="OKHdJ z Pc PQ Pp PD W _S Gn Z B2 BF _M PQFNM wSSLS").get("aria-label").split('(')[-1].split(')')[0].replace('.', '')) + page_size = last_review_index - first_review_index + 1 + reviews_page_information = ReviewsPageInformation(page_size, + page_count=math.ceil(total_review_count / page_size)) + + return TripadvisorAttractionReviewParser(reviews_page_information, self.url).parse_reviews()