Update thingy_grabber.py
[clinton/thingy_grabber.git] / thingy_grabber.py
CommitLineData
975060c9
OM
1#!/usr/bin/env python3
2"""
3Thingiverse bulk downloader
4"""
5
6import re
4a98996b 7import sys
975060c9
OM
8import os
9import argparse
10import unicodedata
11import requests
fa2f3251 12import logging
6a777954 13import multiprocessing
7b84ba6d 14import enum
3c82f75b 15from shutil import copyfile
975060c9
OM
16from bs4 import BeautifulSoup
17
18URL_BASE = "https://www.thingiverse.com"
19URL_COLLECTION = URL_BASE + "/ajax/thingcollection/list_collected_things"
948bd56f 20USER_COLLECTION = URL_BASE + "/ajax/user/designs"
975060c9
OM
21
22ID_REGEX = re.compile(r'"id":(\d*),')
23TOTAL_REGEX = re.compile(r'"total":(\d*),')
24LAST_PAGE_REGEX = re.compile(r'"last_page":(\d*),')
25# This appears to be fixed at 12, but if it changes would screw the rest up.
26PER_PAGE_REGEX = re.compile(r'"per_page":(\d*),')
dd8c35f4
OM
27NO_WHITESPACE_REGEX = re.compile(r'[-\s]+')
28
6a777954 29DOWNLOADER_COUNT = 1
7b84ba6d 30RETRY_COUNT = 3
6a777954
OM
31
32VERSION = "0.7.0"
dbdb1782 33
7b84ba6d
OM
34class State(enum.Enum):
35 OK = enum.auto()
36 FAILED = enum.auto()
37 ALREADY_DOWNLOADED = enum.auto()
38
dbdb1782 39
dd8c35f4
OM
40def strip_ws(value):
41 """ Remove whitespace from a string """
42 return str(NO_WHITESPACE_REGEX.sub('-', value))
975060c9 43
dbdb1782 44
975060c9
OM
45def slugify(value):
46 """
47 Normalizes string, converts to lowercase, removes non-alpha characters,
48 and converts spaces to hyphens.
49 """
dbdb1782
OM
50 value = unicodedata.normalize('NFKD', value).encode(
51 'ascii', 'ignore').decode()
975060c9 52 value = str(re.sub(r'[^\w\s-]', '', value).strip())
dd8c35f4 53 value = str(NO_WHITESPACE_REGEX.sub('-', value))
975060c9
OM
54 return value
55
6a777954
OM
56class Downloader(multiprocessing.Process):
57 """
58 Class to handle downloading the things we have found to get.
59 """
60
61 def __init__(self, thing_queue, download_directory):
62 multiprocessing.Process.__init__(self)
63 # TODO: add parameters
64 self.thing_queue = thing_queue
65 self.download_directory = download_directory
66
67 def run(self):
68 """ actual download loop.
69 """
70 while True:
71 thing_id = self.thing_queue.get()
72 if thing_id is None:
73 logging.info("Shutting download queue")
74 self.thing_queue.task_done()
75 break
76 logging.info("Handling id {}".format(thing_id))
77 Thing(thing_id).download(self.download_directory)
78 self.thing_queue.task_done()
79 return
80
7b84ba6d 81
6a777954
OM
82
83
dbdb1782 84
3522a3bf 85class Grouping:
d66f1f78 86 """ Holds details of a group of things for download
3c82f75b
OM
87 This is effectively (although not actually) an abstract class
88 - use Collection or Designs instead.
89 """
dbdb1782 90
7b84ba6d 91 def __init__(self, quick):
975060c9
OM
92 self.things = []
93 self.total = 0
94 self.req_id = None
95 self.last_page = 0
96 self.per_page = None
7b84ba6d
OM
97 # Should we stop downloading when we hit a known datestamp?
98 self.quick = quick
948bd56f 99 # These should be set by child classes.
3522a3bf
OM
100 self.url = None
101 self.download_dir = None
948bd56f 102 self.collection_url = None
975060c9 103
3522a3bf
OM
104 def _get_small_grouping(self, req):
105 """ Handle small groupings """
975060c9 106 soup = BeautifulSoup(req.text, features='lxml')
dbdb1782 107 links = soup.find_all('a', {'class': 'card-img-holder'})
975060c9 108 self.things = [x['href'].split(':')[1] for x in links]
fa2f3251 109 self.total = len(self.things)
975060c9
OM
110
111 return self.things
112
3522a3bf
OM
113 def get(self):
114 """ retrieve the things of the grouping. """
975060c9
OM
115 if self.things:
116 # We've already done it.
117 return self.things
118
3522a3bf
OM
119 # Check for initialisation:
120 if not self.url:
fa2f3251 121 logging.error("No URL set - object not initialised properly?")
3522a3bf
OM
122 raise ValueError("No URL set - object not initialised properly?")
123
124 # Get the internal details of the grouping.
fa2f3251 125 logging.debug("Querying {}".format(self.url))
3522a3bf 126 c_req = requests.get(self.url)
975060c9
OM
127 total = TOTAL_REGEX.search(c_req.text)
128 if total is None:
3522a3bf
OM
129 # This is a small (<13) items grouping. Pull the list from this req.
130 return self._get_small_grouping(c_req)
975060c9
OM
131 self.total = total.groups()[0]
132 self.req_id = ID_REGEX.search(c_req.text).groups()[0]
133 self.last_page = int(LAST_PAGE_REGEX.search(c_req.text).groups()[0])
134 self.per_page = PER_PAGE_REGEX.search(c_req.text).groups()[0]
135 parameters = {
dbdb1782
OM
136 'base_url': self.url,
137 'page': '1',
138 'per_page': '12',
139 'id': self.req_id
975060c9
OM
140 }
141 for current_page in range(1, self.last_page + 1):
142 parameters['page'] = current_page
948bd56f 143 req = requests.post(self.collection_url, parameters)
975060c9 144 soup = BeautifulSoup(req.text, features='lxml')
dbdb1782 145 links = soup.find_all('a', {'class': 'card-img-holder'})
975060c9
OM
146 self.things += [x['href'].split(':')[1] for x in links]
147
148 return self.things
149
150 def download(self):
151 """ Downloads all the files in a collection """
152 if not self.things:
3522a3bf
OM
153 self.get()
154
155 if not self.download_dir:
dbdb1782
OM
156 raise ValueError(
157 "No download_dir set - invalidly initialised object?")
3522a3bf 158
975060c9 159 base_dir = os.getcwd()
975060c9 160 try:
3522a3bf 161 os.mkdir(self.download_dir)
975060c9 162 except FileExistsError:
fa2f3251 163 logging.info("Target directory {} already exists. Assuming a resume."
dbdb1782 164 .format(self.download_dir))
fa2f3251 165 logging.info("Downloading {} thing(s).".format(self.total))
dbdb1782 166 for idx, thing in enumerate(self.things):
fa2f3251 167 logging.info("Downloading thing {}".format(idx))
7b84ba6d
OM
168 RC = Thing(thing).download(self.download_dir)
169 if self.quick and RC==State.ALREADY_DOWNLOADED:
170 logging.info("Caught up, stopping.")
171 return
975060c9 172
dbdb1782 173
3522a3bf
OM
174class Collection(Grouping):
175 """ Holds details of a collection. """
dbdb1782 176
7b84ba6d
OM
177 def __init__(self, user, name, directory, quick):
178 Grouping.__init__(self, quick)
3522a3bf
OM
179 self.user = user
180 self.name = name
3c82f75b
OM
181 self.url = "{}/{}/collections/{}".format(
182 URL_BASE, self.user, strip_ws(self.name))
d66f1f78 183 self.download_dir = os.path.join(directory,
3c82f75b 184 "{}-{}".format(slugify(self.user), slugify(self.name)))
948bd56f 185 self.collection_url = URL_COLLECTION
3522a3bf 186
dbdb1782 187
3522a3bf
OM
188class Designs(Grouping):
189 """ Holds details of all of a users' designs. """
dbdb1782 190
7b84ba6d
OM
191 def __init__(self, user, directory, quick):
192 Grouping.__init__(self, quick)
3522a3bf
OM
193 self.user = user
194 self.url = "{}/{}/designs".format(URL_BASE, self.user)
dbdb1782
OM
195 self.download_dir = os.path.join(
196 directory, "{} designs".format(slugify(self.user)))
948bd56f 197 self.collection_url = USER_COLLECTION
975060c9 198
dbdb1782 199
3c82f75b
OM
200class Thing:
201 """ An individual design on thingiverse. """
dbdb1782 202
3c82f75b
OM
203 def __init__(self, thing_id):
204 self.thing_id = thing_id
205 self.last_time = None
206 self._parsed = False
207 self._needs_download = True
208 self.text = None
209 self.title = None
210 self.download_dir = None
975060c9 211
3c82f75b
OM
212 def _parse(self, base_dir):
213 """ Work out what, if anything needs to be done. """
214 if self._parsed:
215 return
e36c2a07 216
3c82f75b 217 url = "{}/thing:{}/files".format(URL_BASE, self.thing_id)
e0e69fc6 218 try:
8cdd1b54 219 req = requests.get(url)
e0e69fc6 220 except requests.exceptions.ConnectionError as error:
8cdd1b54
OM
221 logging.error("Unable to connect for thing {}: {}".format(
222 self.thing_id, error))
223 return
e0e69fc6 224
3c82f75b
OM
225 self.text = req.text
226 soup = BeautifulSoup(self.text, features='lxml')
680039fe
OM
227 #import code
228 #code.interact(local=dict(globals(), **locals()))
e0e69fc6 229 try:
8cdd1b54 230 self.title = slugify(soup.find_all('h1')[0].text.strip())
e0e69fc6 231 except IndexError:
8cdd1b54
OM
232 logging.warning(
233 "No title found for thing {}".format(self.thing_id))
234 self.title = self.thing_id
e0e69fc6
OM
235
236 if req.status_code == 404:
8cdd1b54
OM
237 logging.warning(
238 "404 for thing {} - DMCA or invalid number?".format(self.thing_id))
239 return
e0e69fc6
OM
240
241 if req.status_code > 299:
8cdd1b54
OM
242 logging.warning(
243 "bad status code {} for thing {} - try again later?".format(req.status_code, self.thing_id))
244 return
e0e69fc6 245
3b497b1a 246 self.old_download_dir = os.path.join(base_dir, self.title)
84ca2da2 247 self.download_dir = os.path.join(base_dir, "{} - {}".format(self.thing_id, self.title))
3c82f75b 248
fa2f3251
OM
249 logging.debug("Parsing {} ({})".format(self.thing_id, self.title))
250
3c82f75b 251 if not os.path.exists(self.download_dir):
3b497b1a
M
252 if os.path.exists(self.old_download_dir):
253 logging.info("Found previous style download directory. Moving it")
254 copyfile(self.old_download_dir, self.download_dir)
255 else:
256 # Not yet downloaded
257 self._parsed = True
258 return
3c82f75b
OM
259
260 timestamp_file = os.path.join(self.download_dir, 'timestamp.txt')
261 if not os.path.exists(timestamp_file):
262 # Old download from before
dbdb1782
OM
263 logging.warning(
264 "Old-style download directory found. Assuming update required.")
3c82f75b
OM
265 self._parsed = True
266 return
267
268 try:
269 with open(timestamp_file, 'r') as timestamp_handle:
270 self.last_time = timestamp_handle.readlines()[0]
fa2f3251 271 logging.info("last downloaded version: {}".format(self.last_time))
3c82f75b
OM
272 except FileNotFoundError:
273 # Not run on this thing before.
dbdb1782
OM
274 logging.info(
275 "Old-style download directory found. Assuming update required.")
3c82f75b
OM
276 self.last_time = None
277 self._parsed = True
278 return
279
280 # OK, so we have a timestamp, lets see if there is anything new to get
dbdb1782 281 file_links = soup.find_all('a', {'class': 'file-download'})
3c82f75b
OM
282 for file_link in file_links:
283 timestamp = file_link.find_all('time')[0]['datetime']
dbdb1782
OM
284 logging.debug("Checking {} (updated {})".format(
285 file_link["title"], timestamp))
3c82f75b 286 if timestamp > self.last_time:
dbdb1782
OM
287 logging.info(
288 "Found new/updated file {}".format(file_link["title"]))
3c82f75b
OM
289 self._needs_download = True
290 self._parsed = True
291 return
292 # Got here, so nope, no new files.
3c82f75b
OM
293 self._needs_download = False
294 self._parsed = True
295
296 def download(self, base_dir):
7b84ba6d
OM
297 """ Download all files for a given thing.
298 Returns True iff the thing is now downloaded (not iff it downloads the thing!)
299 """
3c82f75b
OM
300 if not self._parsed:
301 self._parse(base_dir)
302
e0e69fc6 303 if not self._parsed:
8cdd1b54
OM
304 logging.error(
305 "Unable to parse {} - aborting download".format(self.thing_id))
7b84ba6d 306 return State.FAILED
e0e69fc6 307
3c82f75b 308 if not self._needs_download:
7b84ba6d
OM
309 print("{} - {} already downloaded - skipping.".format(self.thing_id, self.title))
310 return State.ALREADY_DOWNLOADED
3c82f75b
OM
311
312 # Have we already downloaded some things?
313 timestamp_file = os.path.join(self.download_dir, 'timestamp.txt')
314 prev_dir = None
315 if os.path.exists(self.download_dir):
316 if not os.path.exists(timestamp_file):
317 # edge case: old style dir w/out timestamp.
dbdb1782
OM
318 logging.warning(
319 "Old style download dir found for {}".format(self.title))
4f94efc8
OM
320 prev_count = 0
321 target_dir = "{}_old".format(self.download_dir)
322 while os.path.exists(target_dir):
323 prev_count = prev_count + 1
324 target_dir = "{}_old_{}".format(self.download_dir, prev_count)
325 os.rename(self.download_dir, target_dir)
3c82f75b 326 else:
2560222a 327 prev_dir = "{}_{}".format(self.download_dir, slugify(self.last_time))
3c82f75b
OM
328 os.rename(self.download_dir, prev_dir)
329
330 # Get the list of files to download
331 soup = BeautifulSoup(self.text, features='lxml')
dbdb1782 332 file_links = soup.find_all('a', {'class': 'file-download'})
3c82f75b
OM
333
334 new_file_links = []
335 old_file_links = []
336 new_last_time = None
337
338 if not self.last_time:
339 # If we don't have anything to copy from, then it is all new.
340 new_file_links = file_links
e0e69fc6 341 try:
8cdd1b54 342 new_last_time = file_links[0].find_all('time')[0]['datetime']
e0e69fc6 343 except:
8cdd1b54
OM
344 import code
345 code.interact(local=dict(globals(), **locals()))
e0e69fc6 346
3c82f75b
OM
347 for file_link in file_links:
348 timestamp = file_link.find_all('time')[0]['datetime']
dbdb1782
OM
349 logging.debug("Found file {} from {}".format(
350 file_link["title"], timestamp))
3c82f75b
OM
351 if timestamp > new_last_time:
352 new_last_time = timestamp
353 else:
354 for file_link in file_links:
355 timestamp = file_link.find_all('time')[0]['datetime']
dbdb1782
OM
356 logging.debug("Checking {} (updated {})".format(
357 file_link["title"], timestamp))
3c82f75b
OM
358 if timestamp > self.last_time:
359 new_file_links.append(file_link)
360 else:
361 old_file_links.append(file_link)
362 if not new_last_time or timestamp > new_last_time:
363 new_last_time = timestamp
364
fa2f3251 365 logging.debug("new timestamp {}".format(new_last_time))
3c82f75b
OM
366
367 # OK. Time to get to work.
fa2f3251 368 logging.debug("Generating download_dir")
3c82f75b
OM
369 os.mkdir(self.download_dir)
370 # First grab the cached files (if any)
fa2f3251 371 logging.info("Copying {} unchanged files.".format(len(old_file_links)))
3c82f75b
OM
372 for file_link in old_file_links:
373 old_file = os.path.join(prev_dir, file_link["title"])
374 new_file = os.path.join(self.download_dir, file_link["title"])
375 try:
fa2f3251 376 logging.debug("Copying {} to {}".format(old_file, new_file))
3c82f75b
OM
377 copyfile(old_file, new_file)
378 except FileNotFoundError:
dbdb1782
OM
379 logging.warning(
380 "Unable to find {} in old archive, redownloading".format(file_link["title"]))
3c82f75b
OM
381 new_file_links.append(file_link)
382
383 # Now download the new ones
dbdb1782
OM
384 files = [("{}{}".format(URL_BASE, x['href']), x["title"])
385 for x in new_file_links]
386 logging.info("Downloading {} new files of {}".format(
387 len(new_file_links), len(file_links)))
3c82f75b
OM
388 try:
389 for url, name in files:
390 file_name = os.path.join(self.download_dir, name)
dbdb1782
OM
391 logging.debug("Downloading {} from {} to {}".format(
392 name, url, file_name))
3c82f75b
OM
393 data_req = requests.get(url)
394 with open(file_name, 'wb') as handle:
395 handle.write(data_req.content)
396 except Exception as exception:
fa2f3251 397 logging.error("Failed to download {} - {}".format(name, exception))
3c82f75b 398 os.rename(self.download_dir, "{}_failed".format(self.download_dir))
7b84ba6d 399 return State.FAILED
3c82f75b 400
680039fe
OM
401 # People like images
402 image_dir = os.path.join(self.download_dir, 'images')
dbdb1782
OM
403 imagelinks = soup.find_all('span', {'class': 'gallery-slider'})[0] \
404 .find_all('div', {'class': 'gallery-photo'})
fa2f3251 405 logging.info("Downloading {} images.".format(len(imagelinks)))
680039fe
OM
406 try:
407 os.mkdir(image_dir)
fa2f3251 408 for imagelink in imagelinks:
8cdd1b54
OM
409 url = next(filter(None, [imagelink[x] for x in ['data-full',
410 'data-large',
411 'data-medium',
412 'data-thumb']]), None)
b7bfef68 413 if not url:
8cdd1b54
OM
414 logging.warning(
415 "Unable to find any urls for {}".format(imagelink))
b7bfef68
OM
416 continue
417
680039fe
OM
418 filename = os.path.basename(url)
419 if filename.endswith('stl'):
420 filename = "{}.png".format(filename)
421 image_req = requests.get(url)
422 with open(os.path.join(image_dir, filename), 'wb') as handle:
423 handle.write(image_req.content)
424 except Exception as exception:
425 print("Failed to download {} - {}".format(filename, exception))
426 os.rename(self.download_dir, "{}_failed".format(self.download_dir))
7b84ba6d 427 return State.FAILED
680039fe 428
4f75dd69
OM
429 # instructions are good too.
430 logging.info("Downloading readme")
431 try:
8cdd1b54
OM
432 readme_txt = soup.find('meta', property='og:description')[
433 'content']
434 with open(os.path.join(self.download_dir, 'readme.txt'), 'w') as readme_handle:
4f75dd69
OM
435 readme_handle.write("{}\n".format(readme_txt))
436 except (TypeError, KeyError) as exception:
437 logging.warning("No readme? {}".format(exception))
438 except IOError as exception:
439 logging.warning("Failed to write readme! {}".format(exception))
440
441 # Best get some licenses
442 logging.info("Downloading license")
443 try:
8cdd1b54 444 license_txt = soup.find('div', {'class': 'license-text'}).text
4f75dd69 445 if license_txt:
8cdd1b54 446 with open(os.path.join(self.download_dir, 'license.txt'), 'w') as license_handle:
4f75dd69
OM
447 license_handle.write("{}\n".format(license_txt))
448 except AttributeError as exception:
449 logging.warning("No license? {}".format(exception))
450 except IOError as exception:
451 logging.warning("Failed to write license! {}".format(exception))
452
3c82f75b
OM
453 try:
454 # Now write the timestamp
455 with open(timestamp_file, 'w') as timestamp_handle:
456 timestamp_handle.write(new_last_time)
457 except Exception as exception:
458 print("Failed to write timestamp file - {}".format(exception))
459 os.rename(self.download_dir, "{}_failed".format(self.download_dir))
7b84ba6d 460 return State.FAILED
3c82f75b 461 self._needs_download = False
fa2f3251 462 logging.debug("Download of {} finished".format(self.title))
7b84ba6d 463 return State.OK
975060c9 464
dbdb1782 465
7b84ba6d 466def do_batch(batch_file, download_dir, quick):
1ab49020
OM
467 """ Read a file in line by line, parsing each as a set of calls to this script."""
468 with open(batch_file) as handle:
469 for line in handle:
470 line = line.strip()
cf280385
M
471 if not line:
472 # Skip empty lines
473 continue
1ab49020
OM
474 logging.info("Handling instruction {}".format(line))
475 command_arr = line.split()
476 if command_arr[0] == "thing":
dbdb1782
OM
477 logging.debug(
478 "Handling batch thing instruction: {}".format(line))
1ab49020
OM
479 Thing(command_arr[1]).download(download_dir)
480 continue
481 if command_arr[0] == "collection":
dbdb1782
OM
482 logging.debug(
483 "Handling batch collection instruction: {}".format(line))
484 Collection(command_arr[1], command_arr[2],
7b84ba6d 485 download_dir, quick).download()
1ab49020
OM
486 continue
487 if command_arr[0] == "user":
dbdb1782
OM
488 logging.debug(
489 "Handling batch collection instruction: {}".format(line))
7b84ba6d 490 Designs(command_arr[1], download_dir, quick).download()
1ab49020
OM
491 continue
492 logging.warning("Unable to parse current instruction. Skipping.")
493
dbdb1782 494
975060c9
OM
495def main():
496 """ Entry point for script being run as a command. """
497 parser = argparse.ArgumentParser()
dbdb1782
OM
498 parser.add_argument("-l", "--log-level", choices=[
499 'debug', 'info', 'warning'], default='info', help="level of logging desired")
500 parser.add_argument("-d", "--directory",
501 help="Target directory to download into")
4f94efc8
OM
502 parser.add_argument("-f", "--log-file",
503 help="Place to log debug information to")
7b84ba6d
OM
504 parser.add_argument("-q", "--quick", action="store_true",
505 help="Assume date ordering on posts")
506
dbdb1782
OM
507 subparsers = parser.add_subparsers(
508 help="Type of thing to download", dest="subcommand")
509 collection_parser = subparsers.add_parser(
b7bfef68 510 'collection', help="Download one or more entire collection(s)")
dbdb1782 511 collection_parser.add_argument(
b7bfef68 512 "owner", help="The owner of the collection(s) to get")
dbdb1782 513 collection_parser.add_argument(
b7bfef68 514 "collections", nargs="+", help="Space seperated list of the name(s) of collection to get")
dbdb1782
OM
515 thing_parser = subparsers.add_parser(
516 'thing', help="Download a single thing.")
8cdd1b54
OM
517 thing_parser.add_argument(
518 "things", nargs="*", help="Space seperated list of thing ID(s) to download")
dbdb1782 519 user_parser = subparsers.add_parser(
b7bfef68 520 "user", help="Download all things by one or more users")
8cdd1b54
OM
521 user_parser.add_argument(
522 "users", nargs="+", help="A space seperated list of the user(s) to get the designs of")
dbdb1782
OM
523 batch_parser = subparsers.add_parser(
524 "batch", help="Perform multiple actions written in a text file")
525 batch_parser.add_argument(
526 "batch_file", help="The name of the file to read.")
680039fe 527 subparsers.add_parser("version", help="Show the current version")
4a98996b 528
975060c9 529 args = parser.parse_args()
4a98996b
OM
530 if not args.subcommand:
531 parser.print_help()
532 sys.exit(1)
d66f1f78
OM
533 if not args.directory:
534 args.directory = os.getcwd()
4f94efc8
OM
535
536 logger = logging.getLogger()
537 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
538 logger.setLevel(logging.DEBUG)
539 console_handler = logging.StreamHandler()
540 console_handler.setLevel(args.log_level.upper())
541
542 logger.addHandler(console_handler)
543 if args.log_file:
544 file_handler = logging.FileHandler(args.log_file)
545 file_handler.setLevel(logging.DEBUG)
546 file_handler.setFormatter(formatter)
547 logger.addHandler(file_handler)
fa2f3251 548
6a777954
OM
549
550 # Start downloader
551 thing_queue = multiprocessing.JoinableQueue()
552 logging.debug("starting {} downloader(s)".format(DOWNLOADER_COUNT))
553 downloaders = [Downloader(thing_queue, args.directory) for _ in range(DOWNLOADER_COUNT)]
554 for downloader in downloaders:
555 downloader.start()
556
557
4a98996b 558 if args.subcommand.startswith("collection"):
b7bfef68 559 for collection in args.collections:
7b84ba6d 560 Collection(args.owner, collection, args.directory, args.quick).download()
4a98996b 561 if args.subcommand == "thing":
b7bfef68 562 for thing in args.things:
6a777954 563 thing_queue.put(thing)
3522a3bf 564 if args.subcommand == "user":
b7bfef68 565 for user in args.users:
7b84ba6d 566 Designs(user, args.directory, args.quick).download()
db8066ec
OM
567 if args.subcommand == "version":
568 print("thingy_grabber.py version {}".format(VERSION))
1ab49020 569 if args.subcommand == "batch":
7b84ba6d 570 do_batch(args.batch_file, args.directory, args.quick)
1ab49020 571
6a777954
OM
572 # Stop the downloader processes
573 for downloader in downloaders:
574 thing_queue.put(None)
975060c9 575
0930777e
OM
576if __name__ == "__main__":
577 multiprocessing.freeze_support()
975060c9 578 main()