X-Git-Url: https://git.hcoop.net/clinton/thingy_grabber.git/blobdiff_plain/3b497b1a0cef727cb43b1d0ccac8ab2e111a9cd2..ae598d736c1b6090fed7075ba337057f43b9bece:/thingy_grabber.py diff --git a/thingy_grabber.py b/thingy_grabber.py index 9c84e41..5c018bd 100755 --- a/thingy_grabber.py +++ b/thingy_grabber.py @@ -10,8 +10,22 @@ import argparse import unicodedata import requests import logging +import multiprocessing +import enum +import datetime from shutil import copyfile from bs4 import BeautifulSoup +from dataclasses import dataclass +import selenium +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.firefox.options import Options +import atexit +import py7zr + +SEVENZIP_FILTERS = [{'id': py7zr.FILTER_LZMA2}] URL_BASE = "https://www.thingiverse.com" URL_COLLECTION = URL_BASE + "/ajax/thingcollection/list_collected_things" @@ -24,7 +38,84 @@ LAST_PAGE_REGEX = re.compile(r'"last_page":(\d*),') PER_PAGE_REGEX = re.compile(r'"per_page":(\d*),') NO_WHITESPACE_REGEX = re.compile(r'[-\s]+') -VERSION = "0.5.1" +DOWNLOADER_COUNT = 1 +RETRY_COUNT = 3 + +MAX_PATH_LENGTH = 250 + +VERSION = "0.9.0" + + +#BROWSER = webdriver.PhantomJS('./phantomjs') +options = Options() +options.add_argument("--headless") +BROWSER = webdriver.Firefox(options=options) + +BROWSER.set_window_size(1980, 1080) + + +@dataclass +class FileLink: + name: str + last_update: datetime.datetime + link: str + +class FileLinks: + def __init__(self, initial_links=[]): + self.links = [] + self.last_update = None + for link in initial_links: + self.append(link) + + def __iter__(self): + return iter(self.links) + + def __getitem__(self, item): + return self.links[item] + + def __len__(self): + return len(self.links) + + def append(self, link): + try: + self.last_update = max(self.last_update, link.last_update) + except TypeError: + self.last_update = link.last_update + self.links.append(link) + + +class State(enum.Enum): + OK = enum.auto() + FAILED = enum.auto() + ALREADY_DOWNLOADED = enum.auto() + + +def fail_dir(dir_name): + """ When a download has failed, move it sideways. + """ + target_dir = "{}_failed".format(dir_name) + inc = 0 + while os.path.exists(target_dir): + target_dir = "{}_failed_{}".format(dir_name, inc) + inc += 1 + os.rename(dir_name, target_dir) + + +def truncate_name(file_name): + """ Ensure the filename is not too long for, well windows basically. + """ + path = os.path.abspath(file_name) + if len(path) <= MAX_PATH_LENGTH: + return path + to_cut = len(path) - (MAX_PATH_LENGTH + 3) + base, extension = os.path.splitext(path) + inc = 0 + new_path = "{}_{}{}".format(base, inc, extension) + while os.path.exists(new_path): + new_path = "{}_{}{}".format(base, inc, extension) + inc += 1 + return new_path + def strip_ws(value): """ Remove whitespace from a string """ @@ -33,16 +124,92 @@ def strip_ws(value): def slugify(value): """ - Normalizes string, converts to lowercase, removes non-alpha characters, - and converts spaces to hyphens. + Normalise string, removes invalid for filename charactersr + and converts string to lowercase. """ - value = unicodedata.normalize('NFKD', value).encode( - 'ascii', 'ignore').decode() - value = str(re.sub(r'[^\w\s-]', '', value).strip()) - value = str(NO_WHITESPACE_REGEX.sub('-', value)) - #value = str(re.sub(r'[-\s]+', '-', value)) + value = unicodedata.normalize('NFKC', value).lower().strip() + value = re.sub(r'[\\/<>:\?\*\|"]', '', value) + value = re.sub(r'\.*$', '', value) return value +class PageChecker(object): + def __init__(self): + self.log = [] + self.title = None + self.file_count = None + self.files = None + self.images = None + self.license = None + + + def __call__(self, _): + try: + self.log.append("call") + if self.title is None: + # first find the name + name = EC._find_element(BROWSER, (By.CSS_SELECTOR, "[class^=ThingPage__modelName]")) + if name is None: + return False + self.title = name.text + + if self.file_count is None: + # OK. Do we know how many files we have to download? + metrics = EC._find_elements(BROWSER, (By.CSS_SELECTOR, "[class^=MetricButton]")) + self.log.append("got some metrics: {}".format(len(metrics))) + cur_count = int([x.text.split("\n")[0] for x in metrics if x.text.endswith("\nThing Files")][0]) + self.log.append(cur_count) + if cur_count == 0: + return False + self.file_count = cur_count + + self.log.append("looking for {} files".format(self.file_count)) + fileRows = EC._find_elements(BROWSER, (By.CSS_SELECTOR, "[class^=ThingFile__fileRow]")) + self.log.append("found {} files".format(len(fileRows))) + if len(fileRows) < self.file_count: + return False + + self.log.append("Looking for images") + # By this point _should_ have loaded all the images + self.images = EC._find_elements(BROWSER, (By.CSS_SELECTOR, "[class^=thumb]")) + self.license = EC._find_element(BROWSER, (By.CSS_SELECTOR, "[class^=License__licenseText]")).text + self.log.append("found {} images".format(len(self.images))) + self.files = fileRows + return True + except Exception: + return False + + + + +class Downloader(multiprocessing.Process): + """ + Class to handle downloading the things we have found to get. + """ + + def __init__(self, thing_queue, download_directory, compress): + multiprocessing.Process.__init__(self) + # TODO: add parameters + self.thing_queue = thing_queue + self.download_directory = download_directory + self.compress = compress + + def run(self): + """ actual download loop. + """ + while True: + thing_id = self.thing_queue.get() + if thing_id is None: + logging.info("Shutting download queue") + self.thing_queue.task_done() + break + logging.info("Handling id {}".format(thing_id)) + Thing(thing_id).download(self.download_directory, self.compress) + self.thing_queue.task_done() + return + + + + class Grouping: """ Holds details of a group of things for download @@ -50,12 +217,15 @@ class Grouping: - use Collection or Designs instead. """ - def __init__(self): + def __init__(self, quick, compress): self.things = [] self.total = 0 self.req_id = None self.last_page = 0 self.per_page = None + # Should we stop downloading when we hit a known datestamp? + self.quick = quick + self.compress = compress # These should be set by child classes. self.url = None self.download_dir = None @@ -124,15 +294,21 @@ class Grouping: .format(self.download_dir)) logging.info("Downloading {} thing(s).".format(self.total)) for idx, thing in enumerate(self.things): - logging.info("Downloading thing {}".format(idx)) - Thing(thing).download(self.download_dir) + logging.info("Downloading thing {} - {}".format(idx, thing)) + RC = Thing(thing).download(self.download_dir, self.compress) + if self.quick and RC==State.ALREADY_DOWNLOADED: + logging.info("Caught up, stopping.") + return + + + class Collection(Grouping): """ Holds details of a collection. """ - def __init__(self, user, name, directory): - Grouping.__init__(self) + def __init__(self, user, name, directory, quick, compress): + Grouping.__init__(self, quick, compress) self.user = user self.name = name self.url = "{}/{}/collections/{}".format( @@ -145,8 +321,8 @@ class Collection(Grouping): class Designs(Grouping): """ Holds details of all of a users' designs. """ - def __init__(self, user, directory): - Grouping.__init__(self) + def __init__(self, user, directory, quick, compress): + Grouping.__init__(self, quick, compress) self.user = user self.url = "{}/{}/designs".format(URL_BASE, self.user) self.download_dir = os.path.join( @@ -165,6 +341,8 @@ class Thing: self.text = None self.title = None self.download_dir = None + self.time_stamp = None + self._file_links = FileLinks() def _parse(self, base_dir): """ Work out what, if anything needs to be done. """ @@ -173,38 +351,57 @@ class Thing: url = "{}/thing:{}/files".format(URL_BASE, self.thing_id) try: - req = requests.get(url) + BROWSER.get(url) + wait = WebDriverWait(BROWSER, 60) + pc = PageChecker() + wait.until(pc) except requests.exceptions.ConnectionError as error: - logging.error("Unable to connect for thing {}: {}".format(self.thing_id, error)) - return + logging.error("Unable to connect for thing {}: {}".format( + self.thing_id, error)) + return + except selenium.common.exceptions.TimeoutException: + logging.error(pc.log) + logging.error("Timeout trying to parse thing {}".format(self.thing_id)) + return - self.text = req.text - soup = BeautifulSoup(self.text, features='lxml') - #import code - #code.interact(local=dict(globals(), **locals())) - try: - self.title = slugify(soup.find_all('h1')[0].text.strip()) - except IndexError: - logging.warning("No title found for thing {}".format(self.thing_id)) - self.title = self.thing_id + self.title = pc.title + if not pc.files: + logging.error("No files found for thing {} - probably thingiverse being broken, try again later".format(self.thing_id)) + for link in pc.files: + logging.debug("Parsing link: {}".format(link.text)) + link_link = link.find_element_by_xpath(".//a").get_attribute("href") + if link_link.endswith("/zip"): + # bulk link. + continue + try: + link_title, link_details, _ = link.text.split("\n") + except ValueError: + # If it is a filetype that doesn't generate a picture, then we get an extra field at the start. + _, link_title, link_details, _ = link.text.split("\n") + + #link_details will be something like '461 kb | Updated 06-11-2019 | 373 Downloads' + #need to convert from M D Y to Y M D + link_date = [int(x) for x in link_details.split("|")[1].split()[-1].split("-")] + try: + self._file_links.append(FileLink(link_title, datetime.datetime(link_date[2], link_date[0], link_date[1]), link_link)) + except ValueError: + logging.error(link_date) - if req.status_code == 404: - logging.warning("404 for thing {} - DMCA or invalid number?".format(self.thing_id)) - return + self._image_links=[x.find_element_by_xpath(".//img").get_attribute("src") for x in pc.images] + self._license = pc.license + self.pc = pc - if req.status_code > 299: - logging.warning("bad status code {} for thing {} - try again later?".format(req.status_code, self.thing_id)) - return - self.old_download_dir = os.path.join(base_dir, self.title) - self.download_dir = os.path.join(base_dir, " - ".format(self.thing_id, self.title)) + self.old_download_dir = os.path.join(base_dir, slugify(self.title)) + self.download_dir = os.path.join(base_dir, "{} - {}".format(self.thing_id, slugify(self.title))) logging.debug("Parsing {} ({})".format(self.thing_id, self.title)) if not os.path.exists(self.download_dir): + logging.info("Looking for old dir at {}".format(self.old_download_dir)) if os.path.exists(self.old_download_dir): - logging.info("Found previous style download directory. Moving it") - copyfile(self.old_download_dir, self.download_dir) + logging.warning("Found previous style download directory. Moving it from {} to {}".format(self.old_download_dir, self.download_dir)) + os.rename(self.old_download_dir, self.download_dir) else: # Not yet downloaded self._parsed = True @@ -220,44 +417,65 @@ class Thing: try: with open(timestamp_file, 'r') as timestamp_handle: - self.last_time = timestamp_handle.readlines()[0] + # add the .split(' ')[0] to remove the timestamp from the old style timestamps + last_bits = [int(x) for x in timestamp_handle.readlines()[0].split(' ')[0].split("-")] + logging.warning(last_bits) + if last_bits[0] == 0: + last_bits[0] = 1 + if last_bits[1] == 0: + last_bits[1] = 1 + if last_bits[2] == 0: + last_bits[2] = 1980 + try: + self.last_time = datetime.datetime(last_bits[0], last_bits[1], last_bits[2]) + except ValueError: + # This one appears to be M D Y + self.last_time = datetime.datetime(last_bits[2], last_bits[0], last_bits[1]) + logging.info("last downloaded version: {}".format(self.last_time)) except FileNotFoundError: # Not run on this thing before. logging.info( "Old-style download directory found. Assuming update required.") self.last_time = None + self._needs_download = True self._parsed = True return # OK, so we have a timestamp, lets see if there is anything new to get - file_links = soup.find_all('a', {'class': 'file-download'}) - for file_link in file_links: - timestamp = file_link.find_all('time')[0]['datetime'] - logging.debug("Checking {} (updated {})".format( - file_link["title"], timestamp)) - if timestamp > self.last_time: + try: + if self._file_links.last_update > self.last_time: logging.info( - "Found new/updated file {}".format(file_link["title"])) + "Found new/updated files {}".format(self._file_links.last_update)) self._needs_download = True self._parsed = True return + except TypeError: + logging.warning("No files found for {}.".format(self.thing_id)) + # Got here, so nope, no new files. self._needs_download = False self._parsed = True - def download(self, base_dir): - """ Download all files for a given thing. """ + def download(self, base_dir, compress): + """ Download all files for a given thing. + Returns True iff the thing is now downloaded (not iff it downloads the thing!) + """ if not self._parsed: self._parse(base_dir) if not self._parsed: - logging.error("Unable to parse {} - aborting download".format(self.thing_id)) - return + logging.error( + "Unable to parse {} - aborting download".format(self.thing_id)) + return State.FAILED if not self._needs_download: - print("{} already downloaded - skipping.".format(self.title)) - return + print("{} - {} already downloaded - skipping.".format(self.thing_id, self.title)) + return State.ALREADY_DOWNLOADED + + if not self._file_links: + print("{} - {} appears to have no files. Thingiverse acting up again?".format(self.thing_id, self.title)) + return State.FAILED # Have we already downloaded some things? timestamp_file = os.path.join(self.download_dir, 'timestamp.txt') @@ -265,8 +483,7 @@ class Thing: if os.path.exists(self.download_dir): if not os.path.exists(timestamp_file): # edge case: old style dir w/out timestamp. - logging.warning( - "Old style download dir found for {}".format(self.title)) + logging.warning("Old style download dir found at {}".format(self.title)) prev_count = 0 target_dir = "{}_old".format(self.download_dir) while os.path.exists(target_dir): @@ -274,54 +491,56 @@ class Thing: target_dir = "{}_old_{}".format(self.download_dir, prev_count) os.rename(self.download_dir, target_dir) else: - prev_dir = "{}_{}".format(self.download_dir, self.last_time) + prev_dir = "{}_{}".format(self.download_dir, slugify(self.last_time.__str__())) os.rename(self.download_dir, prev_dir) # Get the list of files to download - soup = BeautifulSoup(self.text, features='lxml') - file_links = soup.find_all('a', {'class': 'file-download'}) new_file_links = [] old_file_links = [] - new_last_time = None + self.time_stamp = None if not self.last_time: # If we don't have anything to copy from, then it is all new. - new_file_links = file_links - try: - new_last_time = file_links[0].find_all('time')[0]['datetime'] - except: - import code - code.interact(local=dict(globals(), **locals())) - - for file_link in file_links: - timestamp = file_link.find_all('time')[0]['datetime'] - logging.debug("Found file {} from {}".format( - file_link["title"], timestamp)) - if timestamp > new_last_time: - new_last_time = timestamp + logging.debug("No last time, downloading all files") + new_file_links = self._file_links + self.time_stamp = new_file_links[0].last_update + + for file_link in new_file_links: + self.time_stamp = max(self.time_stamp, file_link.last_update) + logging.debug("New timestamp will be {}".format(self.time_stamp)) else: - for file_link in file_links: - timestamp = file_link.find_all('time')[0]['datetime'] - logging.debug("Checking {} (updated {})".format( - file_link["title"], timestamp)) - if timestamp > self.last_time: + self.time_stamp = self.last_time + for file_link in self._file_links: + if file_link.last_update > self.last_time: new_file_links.append(file_link) + self.time_stamp = max(self.time_stamp, file_link.last_update) else: old_file_links.append(file_link) - if not new_last_time or timestamp > new_last_time: - new_last_time = timestamp - logging.debug("new timestamp {}".format(new_last_time)) + logging.debug("new timestamp {}".format(self.time_stamp)) # OK. Time to get to work. logging.debug("Generating download_dir") os.mkdir(self.download_dir) + filelist_file = os.path.join(self.download_dir, "filelist.txt") + with open(filelist_file, 'w', encoding="utf-8") as fl_handle: + for fl in self._file_links: + base_link = fl.link + try: + fl.link=requests.get(fl.link, allow_redirects=False).headers['location'] + except Exception: + # Sometimes Thingiverse just gives us the direct link the first time. Not sure why. + pass + + fl_handle.write("{},{},{}, {}\n".format(fl.link, fl.name, fl.last_update, base_link)) + + # First grab the cached files (if any) logging.info("Copying {} unchanged files.".format(len(old_file_links))) for file_link in old_file_links: - old_file = os.path.join(prev_dir, file_link["title"]) - new_file = os.path.join(self.download_dir, file_link["title"]) + old_file = os.path.join(prev_dir, file_link.name) + new_file = truncate_name(os.path.join(self.download_dir, file_link.name)) try: logging.debug("Copying {} to {}".format(old_file, new_file)) copyfile(old_file, new_file) @@ -331,108 +550,119 @@ class Thing: new_file_links.append(file_link) # Now download the new ones - files = [("{}{}".format(URL_BASE, x['href']), x["title"]) - for x in new_file_links] logging.info("Downloading {} new files of {}".format( - len(new_file_links), len(file_links))) + len(new_file_links), len(self._file_links))) try: - for url, name in files: - file_name = os.path.join(self.download_dir, name) + for file_link in new_file_links: + file_name = truncate_name(os.path.join(self.download_dir, file_link.name)) logging.debug("Downloading {} from {} to {}".format( - name, url, file_name)) - data_req = requests.get(url) + file_link.name, file_link.link, file_name)) + data_req = requests.get(file_link.link) with open(file_name, 'wb') as handle: handle.write(data_req.content) except Exception as exception: - logging.error("Failed to download {} - {}".format(name, exception)) - os.rename(self.download_dir, "{}_failed".format(self.download_dir)) - return + logging.error("Failed to download {} - {}".format(file_link.name, exception)) + fail_dir(self.download_dir) + return State.FAILED - # People like images + + # People like images. But this doesn't work yet. image_dir = os.path.join(self.download_dir, 'images') - imagelinks = soup.find_all('span', {'class': 'gallery-slider'})[0] \ - .find_all('div', {'class': 'gallery-photo'}) - logging.info("Downloading {} images.".format(len(imagelinks))) + logging.info("Downloading {} images.".format(len(self._image_links))) try: os.mkdir(image_dir) - for imagelink in imagelinks: - url = next(filter(None,[imagelink[x] for x in ['data-full', - 'data-large', - 'data-medium', - 'data-thumb']]), None) - if not url: - logging.warning("Unable to find any urls for {}".format(imagelink)) - continue - - filename = os.path.basename(url) + for imagelink in self._image_links: + filename = os.path.basename(imagelink) if filename.endswith('stl'): filename = "{}.png".format(filename) - image_req = requests.get(url) - with open(os.path.join(image_dir, filename), 'wb') as handle: + image_req = requests.get(imagelink) + with open(truncate_name(os.path.join(image_dir, filename)), 'wb') as handle: handle.write(image_req.content) except Exception as exception: print("Failed to download {} - {}".format(filename, exception)) - os.rename(self.download_dir, "{}_failed".format(self.download_dir)) - return + fail_dir(self.download_dir) + return State.FAILED + """ # instructions are good too. logging.info("Downloading readme") try: - readme_txt = soup.find('meta', property='og:description')['content'] - with open(os.path.join(self.download_dir,'readme.txt'), 'w') as readme_handle: + readme_txt = soup.find('meta', property='og:description')[ + 'content'] + with open(os.path.join(self.download_dir, 'readme.txt'), 'w') as readme_handle: readme_handle.write("{}\n".format(readme_txt)) except (TypeError, KeyError) as exception: logging.warning("No readme? {}".format(exception)) except IOError as exception: logging.warning("Failed to write readme! {}".format(exception)) + """ # Best get some licenses logging.info("Downloading license") try: - license_txt = soup.find('div',{'class':'license-text'}).text - if license_txt: - with open(os.path.join(self.download_dir,'license.txt'), 'w') as license_handle: - license_handle.write("{}\n".format(license_txt)) - except AttributeError as exception: - logging.warning("No license? {}".format(exception)) + if self._license: + with open(truncate_name(os.path.join(self.download_dir, 'license.txt')), 'w', encoding="utf-8") as license_handle: + license_handle.write("{}\n".format(self._license)) except IOError as exception: logging.warning("Failed to write license! {}".format(exception)) - try: # Now write the timestamp - with open(timestamp_file, 'w') as timestamp_handle: - timestamp_handle.write(new_last_time) + with open(timestamp_file, 'w', encoding="utf-8") as timestamp_handle: + timestamp_handle.write(self.time_stamp.__str__()) except Exception as exception: print("Failed to write timestamp file - {}".format(exception)) - os.rename(self.download_dir, "{}_failed".format(self.download_dir)) - return + fail_dir(self.download_dir) + return State.FAILED self._needs_download = False logging.debug("Download of {} finished".format(self.title)) + if not compress: + return State.OK + + + thing_dir = "{} - {} - {}".format(self.thing_id, + slugify(self.title), + self.time_stamp) + file_name = os.path.join(base_dir, + "{}.7z".format(thing_dir)) + logging.debug("Compressing {} to {}".format( + self.title, + file_name)) + #with libarchive.file_writer(filename, 'lzma', '7z') as archive: + with py7zr.SevenZipFile(file_name, 'w', filters=SEVENZIP_FILTERS) as archive: + #with py7zr.SevenZipFile(file_name, 'w' ) as archive: + archive.writeall(self.download_dir, thing_dir) + logging.debug("Compression of {} finished.".format(self.title)) + return State.OK -def do_batch(batch_file, download_dir): + + +def do_batch(batch_file, download_dir, quick, compress): """ Read a file in line by line, parsing each as a set of calls to this script.""" with open(batch_file) as handle: for line in handle: line = line.strip() + if not line: + # Skip empty lines + continue logging.info("Handling instruction {}".format(line)) command_arr = line.split() if command_arr[0] == "thing": logging.debug( "Handling batch thing instruction: {}".format(line)) - Thing(command_arr[1]).download(download_dir) + Thing(command_arr[1]).download(download_dir, compress) continue if command_arr[0] == "collection": logging.debug( "Handling batch collection instruction: {}".format(line)) Collection(command_arr[1], command_arr[2], - download_dir).download() + download_dir, quick, compress).download() continue if command_arr[0] == "user": logging.debug( "Handling batch collection instruction: {}".format(line)) - Designs(command_arr[1], download_dir).download() + Designs(command_arr[1], download_dir, quick, compress).download() continue logging.warning("Unable to parse current instruction. Skipping.") @@ -446,6 +676,12 @@ def main(): help="Target directory to download into") parser.add_argument("-f", "--log-file", help="Place to log debug information to") + parser.add_argument("-q", "--quick", action="store_true", + help="Assume date ordering on posts") + parser.add_argument("-c", "--compress", action="store_true", + help="Compress files") + + subparsers = parser.add_subparsers( help="Type of thing to download", dest="subcommand") collection_parser = subparsers.add_parser( @@ -456,10 +692,12 @@ def main(): "collections", nargs="+", help="Space seperated list of the name(s) of collection to get") thing_parser = subparsers.add_parser( 'thing', help="Download a single thing.") - thing_parser.add_argument("things", nargs="*", help="Space seperated list of thing ID(s) to download") + thing_parser.add_argument( + "things", nargs="*", help="Space seperated list of thing ID(s) to download") user_parser = subparsers.add_parser( "user", help="Download all things by one or more users") - user_parser.add_argument("users", nargs="+", help="A space seperated list of the user(s) to get the designs of") + user_parser.add_argument( + "users", nargs="+", help="A space seperated list of the user(s) to get the designs of") batch_parser = subparsers.add_parser( "batch", help="Perform multiple actions written in a text file") batch_parser.add_argument( @@ -486,20 +724,35 @@ def main(): file_handler.setFormatter(formatter) logger.addHandler(file_handler) + + # Start downloader + thing_queue = multiprocessing.JoinableQueue() + logging.debug("starting {} downloader(s)".format(DOWNLOADER_COUNT)) + downloaders = [Downloader(thing_queue, args.directory, args.compress) for _ in range(DOWNLOADER_COUNT)] + for downloader in downloaders: + downloader.start() + + if args.subcommand.startswith("collection"): for collection in args.collections: - Collection(args.owner, collection, args.directory).download() + Collection(args.owner, collection, args.directory, args.quick, args.compress).download() if args.subcommand == "thing": for thing in args.things: - Thing(thing).download(args.directory) + thing_queue.put(thing) if args.subcommand == "user": for user in args.users: - Designs(user, args.directory).download() + Designs(user, args.directory, args.quick, args.compress).download() if args.subcommand == "version": print("thingy_grabber.py version {}".format(VERSION)) if args.subcommand == "batch": - do_batch(args.batch_file, args.directory) + do_batch(args.batch_file, args.directory, args.quick, args.compress) + + # Stop the downloader processes + for downloader in downloaders: + thing_queue.put(None) +atexit.register(BROWSER.quit) -if __name__ == "__main__": +if __name__ == "__main__": + multiprocessing.freeze_support() main()