X-Git-Url: http://git.hcoop.net/clinton/thingy_grabber.git/blobdiff_plain/0930777e59ed9b06856e06a31de0ee9ff9a14993..247c2cd5969bf12e949b03402f90b9e40b322324:/thingy_grabber.py diff --git a/thingy_grabber.py b/thingy_grabber.py index 1005d7c..02ea355 100755 --- a/thingy_grabber.py +++ b/thingy_grabber.py @@ -12,8 +12,17 @@ import requests import logging import multiprocessing import enum +import datetime from shutil import copyfile from bs4 import BeautifulSoup +from dataclasses import dataclass +import selenium +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.firefox.options import Options +import atexit URL_BASE = "https://www.thingiverse.com" URL_COLLECTION = URL_BASE + "/ajax/thingcollection/list_collected_things" @@ -29,7 +38,25 @@ NO_WHITESPACE_REGEX = re.compile(r'[-\s]+') DOWNLOADER_COUNT = 1 RETRY_COUNT = 3 -VERSION = "0.7.0" +MAX_PATH_LENGTH = 250 + +VERSION = "0.8.6" + + +#BROWSER = webdriver.PhantomJS('./phantomjs') +options = Options() +options.add_argument("--headless") +BROWSER = webdriver.Firefox(options=options) + +BROWSER.set_window_size(1980, 1080) + + +@dataclass +class FileLink: + name: str + last_update: str + link: datetime.datetime + class State(enum.Enum): OK = enum.auto() @@ -37,6 +64,33 @@ class State(enum.Enum): ALREADY_DOWNLOADED = enum.auto() +def fail_dir(dir_name): + """ When a download has failed, move it sideways. + """ + target_dir = "{}_failed".format(dir_name) + inc = 0 + while os.path.exists(target_dir): + target_dir = "{}_failed_{}".format(dir_name, inc) + inc += 1 + os.rename(dir_name, target_dir) + + +def truncate_name(file_name): + """ Ensure the filename is not too long for, well windows basically. + """ + path = os.path.abspath(file_name) + if len(path) <= MAX_PATH_LENGTH: + return path + to_cut = len(path) - (MAX_PATH_LENGTH + 3) + base, extension = os.path.splitext(path) + inc = 0 + new_path = "{}_{}{}".format(base, inc, extension) + while os.path.exists(new_path): + new_path = "{}_{}{}".format(base, inc, extension) + inc += 1 + return new_path + + def strip_ws(value): """ Remove whitespace from a string """ return str(NO_WHITESPACE_REGEX.sub('-', value)) @@ -44,15 +98,63 @@ def strip_ws(value): def slugify(value): """ - Normalizes string, converts to lowercase, removes non-alpha characters, - and converts spaces to hyphens. + Normalise string, removes invalid for filename charactersr + and converts string to lowercase. """ - value = unicodedata.normalize('NFKD', value).encode( - 'ascii', 'ignore').decode() - value = str(re.sub(r'[^\w\s-]', '', value).strip()) - value = str(NO_WHITESPACE_REGEX.sub('-', value)) + value = unicodedata.normalize('NFKC', value).lower().strip() + value = re.sub(r'[\\/<>:\?\*\|"]', '', value) + value = re.sub(r'\.*$', '', value) return value +class PageChecker(object): + def __init__(self): + self.log = [] + self.title = None + self.file_count = None + self.files = None + self.images = None + self.license = None + + + def __call__(self, _): + try: + self.log.append("call") + if self.title is None: + # first find the name + name = EC._find_element(BROWSER, (By.CSS_SELECTOR, "[class^=ThingPage__modelName]")) + if name is None: + return False + self.title = name.text + + if self.file_count is None: + # OK. Do we know how many files we have to download? + metrics = EC._find_elements(BROWSER, (By.CSS_SELECTOR, "[class^=MetricButton]")) + self.log.append("got some metrics: {}".format(len(metrics))) + cur_count = int([x.text.split("\n")[0] for x in metrics if x.text.endswith("\nThing Files")][0]) + self.log.append(cur_count) + if cur_count == 0: + return False + self.file_count = cur_count + + self.log.append("looking for {} files".format(self.file_count)) + fileRows = EC._find_elements(BROWSER, (By.CSS_SELECTOR, "[class^=ThingFile__fileRow]")) + self.log.append("found {} files".format(len(fileRows))) + if len(fileRows) < self.file_count: + return False + + self.log.append("Looking for images") + # By this point _should_ have loaded all the images + self.images = EC._find_elements(BROWSER, (By.CSS_SELECTOR, "[class^=thumb]")) + self.license = EC._find_element(BROWSER, (By.CSS_SELECTOR, "[class^=License__licenseText]")).text + self.log.append("found {} images".format(len(self.images))) + self.files = fileRows + return True + except Exception: + return False + + + + class Downloader(multiprocessing.Process): """ Class to handle downloading the things we have found to get. @@ -164,7 +266,7 @@ class Grouping: .format(self.download_dir)) logging.info("Downloading {} thing(s).".format(self.total)) for idx, thing in enumerate(self.things): - logging.info("Downloading thing {}".format(idx)) + logging.info("Downloading thing {} - {}".format(idx, thing)) RC = Thing(thing).download(self.download_dir) if self.quick and RC==State.ALREADY_DOWNLOADED: logging.info("Caught up, stopping.") @@ -216,42 +318,58 @@ class Thing: url = "{}/thing:{}/files".format(URL_BASE, self.thing_id) try: - req = requests.get(url) + BROWSER.get(url) + wait = WebDriverWait(BROWSER, 60) + pc = PageChecker() + wait.until(pc) except requests.exceptions.ConnectionError as error: logging.error("Unable to connect for thing {}: {}".format( self.thing_id, error)) return + except selenium.common.exceptions.TimeoutException: + logging.error(pc.log) + logging.error("Timeout trying to parse thing {}".format(self.thing_id)) + return - self.text = req.text - soup = BeautifulSoup(self.text, features='lxml') - #import code - #code.interact(local=dict(globals(), **locals())) - try: - self.title = slugify(soup.find_all('h1')[0].text.strip()) - except IndexError: - logging.warning( - "No title found for thing {}".format(self.thing_id)) - self.title = self.thing_id + self.title = pc.title + self._file_links=[] + if not pc.files: + logging.error("No files found for thing {} - probably thingiverse being broken, try again later".format(self.thing_id)) + for link in pc.files: + logging.debug("Parsing link: {}".format(link.text)) + link_link = link.find_element_by_xpath(".//a").get_attribute("href") + if link_link.endswith("/zip"): + # bulk link. + continue + try: + link_title, link_details, _ = link.text.split("\n") + except ValueError: + # If it is a filetype that doesn't generate a picture, then we get an extra field at the start. + _, link_title, link_details, _ = link.text.split("\n") + + #link_details will be something like '461 kb | Updated 06-11-2019 | 373 Downloads' + #need to convert from M D Y to Y M D + link_date = [int(x) for x in link_details.split("|")[1].split()[-1].split("-")] + try: + self._file_links.append(FileLink(link_title, datetime.datetime(link_date[2], link_date[0], link_date[1]), link_link)) + except ValueError: + logging.error(link_date) - if req.status_code == 404: - logging.warning( - "404 for thing {} - DMCA or invalid number?".format(self.thing_id)) - return + self._image_links=[x.find_element_by_xpath(".//img").get_attribute("src") for x in pc.images] + self._license = pc.license + self.pc = pc - if req.status_code > 299: - logging.warning( - "bad status code {} for thing {} - try again later?".format(req.status_code, self.thing_id)) - return - self.old_download_dir = os.path.join(base_dir, self.title) - self.download_dir = os.path.join(base_dir, "{} - {}".format(self.thing_id, self.title)) + self.old_download_dir = os.path.join(base_dir, slugify(self.title)) + self.download_dir = os.path.join(base_dir, "{} - {}".format(self.thing_id, slugify(self.title))) logging.debug("Parsing {} ({})".format(self.thing_id, self.title)) if not os.path.exists(self.download_dir): + logging.info("Looking for old dir at {}".format(self.old_download_dir)) if os.path.exists(self.old_download_dir): - logging.info("Found previous style download directory. Moving it") - copyfile(self.old_download_dir, self.download_dir) + logging.warning("Found previous style download directory. Moving it from {} to {}".format(self.old_download_dir, self.download_dir)) + os.rename(self.old_download_dir, self.download_dir) else: # Not yet downloaded self._parsed = True @@ -267,28 +385,34 @@ class Thing: try: with open(timestamp_file, 'r') as timestamp_handle: - self.last_time = timestamp_handle.readlines()[0] + # add the .split(' ')[0] to remove the timestamp from the old style timestamps + last_bits = [int(x) for x in timestamp_handle.readlines()[0].split(' ')[0].split("-")] + logging.warning(last_bits) + try: + self.last_time = datetime.datetime(last_bits[0], last_bits[1], last_bits[2]) + except ValueError: + # This one appears to be M D Y + self.last_time = datetime.datetime(last_bits[2], last_bits[0], last_bits[1]) + logging.info("last downloaded version: {}".format(self.last_time)) except FileNotFoundError: # Not run on this thing before. logging.info( "Old-style download directory found. Assuming update required.") self.last_time = None + self._needs_download = True self._parsed = True return # OK, so we have a timestamp, lets see if there is anything new to get - file_links = soup.find_all('a', {'class': 'file-download'}) - for file_link in file_links: - timestamp = file_link.find_all('time')[0]['datetime'] - logging.debug("Checking {} (updated {})".format( - file_link["title"], timestamp)) - if timestamp > self.last_time: + for file_link in self._file_links: + if file_link.last_update > self.last_time: logging.info( - "Found new/updated file {}".format(file_link["title"])) + "Found new/updated file {} - {}".format(file_link.name, file_link.last_update)) self._needs_download = True self._parsed = True return + # Got here, so nope, no new files. self._needs_download = False self._parsed = True @@ -309,14 +433,17 @@ class Thing: print("{} - {} already downloaded - skipping.".format(self.thing_id, self.title)) return State.ALREADY_DOWNLOADED + if not self._file_links: + print("{} - {} appears to have no files. Thingiverse acting up again?".format(self.thing_id, self.title)) + return State.FAILED + # Have we already downloaded some things? timestamp_file = os.path.join(self.download_dir, 'timestamp.txt') prev_dir = None if os.path.exists(self.download_dir): if not os.path.exists(timestamp_file): # edge case: old style dir w/out timestamp. - logging.warning( - "Old style download dir found for {}".format(self.title)) + logging.warning("Old style download dir found at {}".format(self.title)) prev_count = 0 target_dir = "{}_old".format(self.download_dir) while os.path.exists(target_dir): @@ -324,12 +451,10 @@ class Thing: target_dir = "{}_old_{}".format(self.download_dir, prev_count) os.rename(self.download_dir, target_dir) else: - prev_dir = "{}_{}".format(self.download_dir, slugify(self.last_time)) + prev_dir = "{}_{}".format(self.download_dir, slugify(self.last_time.__str__())) os.rename(self.download_dir, prev_dir) # Get the list of files to download - soup = BeautifulSoup(self.text, features='lxml') - file_links = soup.find_all('a', {'class': 'file-download'}) new_file_links = [] old_file_links = [] @@ -337,41 +462,45 @@ class Thing: if not self.last_time: # If we don't have anything to copy from, then it is all new. - new_file_links = file_links - try: - new_last_time = file_links[0].find_all('time')[0]['datetime'] - except: - import code - code.interact(local=dict(globals(), **locals())) - - for file_link in file_links: - timestamp = file_link.find_all('time')[0]['datetime'] - logging.debug("Found file {} from {}".format( - file_link["title"], timestamp)) - if timestamp > new_last_time: - new_last_time = timestamp + logging.debug("No last time, downloading all files") + new_file_links = self._file_links + new_last_time = new_file_links[0].last_update + + for file_link in new_file_links: + new_last_time = max(new_last_time, file_link.last_update) + logging.debug("New timestamp will be {}".format(new_last_time)) else: - for file_link in file_links: - timestamp = file_link.find_all('time')[0]['datetime'] - logging.debug("Checking {} (updated {})".format( - file_link["title"], timestamp)) - if timestamp > self.last_time: + new_last_time = self.last_time + for file_link in self._file_links: + if file_link.last_update > self.last_time: new_file_links.append(file_link) + new_last_time = max(new_last_time, file_link.last_update) else: old_file_links.append(file_link) - if not new_last_time or timestamp > new_last_time: - new_last_time = timestamp logging.debug("new timestamp {}".format(new_last_time)) # OK. Time to get to work. logging.debug("Generating download_dir") os.mkdir(self.download_dir) + filelist_file = os.path.join(self.download_dir, "filelist.txt") + with open(filelist_file, 'w', encoding="utf-8") as fl_handle: + for fl in self._file_links: + base_link = fl.link + try: + fl.link=requests.get(fl.link, allow_redirects=False).headers['location'] + except Exception: + # Sometimes Thingiverse just gives us the direct link the first time. Not sure why. + pass + + fl_handle.write("{},{},{}, {}\n".format(fl.link, fl.name, fl.last_update, base_link)) + + # First grab the cached files (if any) logging.info("Copying {} unchanged files.".format(len(old_file_links))) for file_link in old_file_links: - old_file = os.path.join(prev_dir, file_link["title"]) - new_file = os.path.join(self.download_dir, file_link["title"]) + old_file = os.path.join(prev_dir, file_link.name) + new_file = truncate_name(os.path.join(self.download_dir, file_link.name)) try: logging.debug("Copying {} to {}".format(old_file, new_file)) copyfile(old_file, new_file) @@ -381,51 +510,40 @@ class Thing: new_file_links.append(file_link) # Now download the new ones - files = [("{}{}".format(URL_BASE, x['href']), x["title"]) - for x in new_file_links] logging.info("Downloading {} new files of {}".format( - len(new_file_links), len(file_links))) + len(new_file_links), len(self._file_links))) try: - for url, name in files: - file_name = os.path.join(self.download_dir, name) + for file_link in new_file_links: + file_name = truncate_name(os.path.join(self.download_dir, file_link.name)) logging.debug("Downloading {} from {} to {}".format( - name, url, file_name)) - data_req = requests.get(url) + file_link.name, file_link.link, file_name)) + data_req = requests.get(file_link.link) with open(file_name, 'wb') as handle: handle.write(data_req.content) except Exception as exception: - logging.error("Failed to download {} - {}".format(name, exception)) - os.rename(self.download_dir, "{}_failed".format(self.download_dir)) + logging.error("Failed to download {} - {}".format(file_link.name, exception)) + fail_dir(self.download_dir) return State.FAILED - # People like images + + # People like images. But this doesn't work yet. image_dir = os.path.join(self.download_dir, 'images') - imagelinks = soup.find_all('span', {'class': 'gallery-slider'})[0] \ - .find_all('div', {'class': 'gallery-photo'}) - logging.info("Downloading {} images.".format(len(imagelinks))) + logging.info("Downloading {} images.".format(len(self._image_links))) try: os.mkdir(image_dir) - for imagelink in imagelinks: - url = next(filter(None, [imagelink[x] for x in ['data-full', - 'data-large', - 'data-medium', - 'data-thumb']]), None) - if not url: - logging.warning( - "Unable to find any urls for {}".format(imagelink)) - continue - - filename = os.path.basename(url) + for imagelink in self._image_links: + filename = os.path.basename(imagelink) if filename.endswith('stl'): filename = "{}.png".format(filename) - image_req = requests.get(url) - with open(os.path.join(image_dir, filename), 'wb') as handle: + image_req = requests.get(imagelink) + with open(truncate_name(os.path.join(image_dir, filename)), 'wb') as handle: handle.write(image_req.content) except Exception as exception: print("Failed to download {} - {}".format(filename, exception)) - os.rename(self.download_dir, "{}_failed".format(self.download_dir)) + fail_dir(self.download_dir) return State.FAILED + """ # instructions are good too. logging.info("Downloading readme") try: @@ -438,25 +556,23 @@ class Thing: except IOError as exception: logging.warning("Failed to write readme! {}".format(exception)) + """ # Best get some licenses logging.info("Downloading license") try: - license_txt = soup.find('div', {'class': 'license-text'}).text - if license_txt: - with open(os.path.join(self.download_dir, 'license.txt'), 'w') as license_handle: - license_handle.write("{}\n".format(license_txt)) - except AttributeError as exception: - logging.warning("No license? {}".format(exception)) + if self._license: + with open(truncate_name(os.path.join(self.download_dir, 'license.txt')), 'w', encoding="utf-8") as license_handle: + license_handle.write("{}\n".format(self._license)) except IOError as exception: logging.warning("Failed to write license! {}".format(exception)) try: # Now write the timestamp - with open(timestamp_file, 'w') as timestamp_handle: - timestamp_handle.write(new_last_time) + with open(timestamp_file, 'w', encoding="utf-8") as timestamp_handle: + timestamp_handle.write(new_last_time.__str__()) except Exception as exception: print("Failed to write timestamp file - {}".format(exception)) - os.rename(self.download_dir, "{}_failed".format(self.download_dir)) + fail_dir(self.download_dir) return State.FAILED self._needs_download = False logging.debug("Download of {} finished".format(self.title)) @@ -573,6 +689,8 @@ def main(): for downloader in downloaders: thing_queue.put(None) +atexit.register(BROWSER.quit) + if __name__ == "__main__": multiprocessing.freeze_support() main()