remove : from timestamps on updated dirs
[clinton/thingy_grabber.git] / thingy_grabber.py
CommitLineData
975060c9
OM
1#!/usr/bin/env python3
2"""
3Thingiverse bulk downloader
4"""
5
6import re
4a98996b 7import sys
975060c9
OM
8import os
9import argparse
10import unicodedata
11import requests
fa2f3251 12import logging
6a777954 13import multiprocessing
7b84ba6d 14import enum
3c82f75b 15from shutil import copyfile
975060c9
OM
16from bs4 import BeautifulSoup
17
18URL_BASE = "https://www.thingiverse.com"
19URL_COLLECTION = URL_BASE + "/ajax/thingcollection/list_collected_things"
948bd56f 20USER_COLLECTION = URL_BASE + "/ajax/user/designs"
975060c9
OM
21
22ID_REGEX = re.compile(r'"id":(\d*),')
23TOTAL_REGEX = re.compile(r'"total":(\d*),')
24LAST_PAGE_REGEX = re.compile(r'"last_page":(\d*),')
25# This appears to be fixed at 12, but if it changes would screw the rest up.
26PER_PAGE_REGEX = re.compile(r'"per_page":(\d*),')
dd8c35f4
OM
27NO_WHITESPACE_REGEX = re.compile(r'[-\s]+')
28
6a777954 29DOWNLOADER_COUNT = 1
7b84ba6d 30RETRY_COUNT = 3
6a777954
OM
31
32VERSION = "0.7.0"
dbdb1782 33
7b84ba6d
OM
34class State(enum.Enum):
35 OK = enum.auto()
36 FAILED = enum.auto()
37 ALREADY_DOWNLOADED = enum.auto()
38
39
dd8c35f4
OM
40def strip_ws(value):
41 """ Remove whitespace from a string """
42 return str(NO_WHITESPACE_REGEX.sub('-', value))
975060c9 43
dbdb1782 44
975060c9
OM
45def slugify(value):
46 """
47 Normalizes string, converts to lowercase, removes non-alpha characters,
48 and converts spaces to hyphens.
49 """
dbdb1782
OM
50 value = unicodedata.normalize('NFKD', value).encode(
51 'ascii', 'ignore').decode()
975060c9 52 value = str(re.sub(r'[^\w\s-]', '', value).strip())
dd8c35f4 53 value = str(NO_WHITESPACE_REGEX.sub('-', value))
975060c9
OM
54 return value
55
6a777954
OM
56class Downloader(multiprocessing.Process):
57 """
58 Class to handle downloading the things we have found to get.
59 """
60
61 def __init__(self, thing_queue, download_directory):
62 multiprocessing.Process.__init__(self)
63 # TODO: add parameters
64 self.thing_queue = thing_queue
65 self.download_directory = download_directory
66
67 def run(self):
68 """ actual download loop.
69 """
70 while True:
71 thing_id = self.thing_queue.get()
72 if thing_id is None:
73 logging.info("Shutting download queue")
74 self.thing_queue.task_done()
75 break
76 logging.info("Handling id {}".format(thing_id))
77 Thing(thing_id).download(self.download_directory)
78 self.thing_queue.task_done()
79 return
80
7b84ba6d 81
6a777954
OM
82
83
dbdb1782 84
3522a3bf 85class Grouping:
d66f1f78 86 """ Holds details of a group of things for download
3c82f75b
OM
87 This is effectively (although not actually) an abstract class
88 - use Collection or Designs instead.
89 """
dbdb1782 90
7b84ba6d 91 def __init__(self, quick):
975060c9
OM
92 self.things = []
93 self.total = 0
94 self.req_id = None
95 self.last_page = 0
96 self.per_page = None
7b84ba6d
OM
97 # Should we stop downloading when we hit a known datestamp?
98 self.quick = quick
948bd56f 99 # These should be set by child classes.
3522a3bf
OM
100 self.url = None
101 self.download_dir = None
948bd56f 102 self.collection_url = None
975060c9 103
3522a3bf
OM
104 def _get_small_grouping(self, req):
105 """ Handle small groupings """
975060c9 106 soup = BeautifulSoup(req.text, features='lxml')
dbdb1782 107 links = soup.find_all('a', {'class': 'card-img-holder'})
975060c9 108 self.things = [x['href'].split(':')[1] for x in links]
fa2f3251 109 self.total = len(self.things)
975060c9
OM
110
111 return self.things
112
3522a3bf
OM
113 def get(self):
114 """ retrieve the things of the grouping. """
975060c9
OM
115 if self.things:
116 # We've already done it.
117 return self.things
118
3522a3bf
OM
119 # Check for initialisation:
120 if not self.url:
fa2f3251 121 logging.error("No URL set - object not initialised properly?")
3522a3bf
OM
122 raise ValueError("No URL set - object not initialised properly?")
123
124 # Get the internal details of the grouping.
fa2f3251 125 logging.debug("Querying {}".format(self.url))
3522a3bf 126 c_req = requests.get(self.url)
975060c9
OM
127 total = TOTAL_REGEX.search(c_req.text)
128 if total is None:
3522a3bf
OM
129 # This is a small (<13) items grouping. Pull the list from this req.
130 return self._get_small_grouping(c_req)
975060c9
OM
131 self.total = total.groups()[0]
132 self.req_id = ID_REGEX.search(c_req.text).groups()[0]
133 self.last_page = int(LAST_PAGE_REGEX.search(c_req.text).groups()[0])
134 self.per_page = PER_PAGE_REGEX.search(c_req.text).groups()[0]
135 parameters = {
dbdb1782
OM
136 'base_url': self.url,
137 'page': '1',
138 'per_page': '12',
139 'id': self.req_id
975060c9
OM
140 }
141 for current_page in range(1, self.last_page + 1):
142 parameters['page'] = current_page
948bd56f 143 req = requests.post(self.collection_url, parameters)
975060c9 144 soup = BeautifulSoup(req.text, features='lxml')
dbdb1782 145 links = soup.find_all('a', {'class': 'card-img-holder'})
975060c9
OM
146 self.things += [x['href'].split(':')[1] for x in links]
147
148 return self.things
149
150 def download(self):
151 """ Downloads all the files in a collection """
152 if not self.things:
3522a3bf
OM
153 self.get()
154
155 if not self.download_dir:
dbdb1782
OM
156 raise ValueError(
157 "No download_dir set - invalidly initialised object?")
3522a3bf 158
975060c9 159 base_dir = os.getcwd()
975060c9 160 try:
3522a3bf 161 os.mkdir(self.download_dir)
975060c9 162 except FileExistsError:
fa2f3251 163 logging.info("Target directory {} already exists. Assuming a resume."
dbdb1782 164 .format(self.download_dir))
fa2f3251 165 logging.info("Downloading {} thing(s).".format(self.total))
dbdb1782 166 for idx, thing in enumerate(self.things):
fa2f3251 167 logging.info("Downloading thing {}".format(idx))
7b84ba6d
OM
168 RC = Thing(thing).download(self.download_dir)
169 if self.quick and RC==State.ALREADY_DOWNLOADED:
170 logging.info("Caught up, stopping.")
171 return
975060c9 172
dbdb1782 173
3522a3bf
OM
174class Collection(Grouping):
175 """ Holds details of a collection. """
dbdb1782 176
7b84ba6d
OM
177 def __init__(self, user, name, directory, quick):
178 Grouping.__init__(self, quick)
3522a3bf
OM
179 self.user = user
180 self.name = name
3c82f75b
OM
181 self.url = "{}/{}/collections/{}".format(
182 URL_BASE, self.user, strip_ws(self.name))
d66f1f78 183 self.download_dir = os.path.join(directory,
3c82f75b 184 "{}-{}".format(slugify(self.user), slugify(self.name)))
948bd56f 185 self.collection_url = URL_COLLECTION
3522a3bf 186
dbdb1782 187
3522a3bf
OM
188class Designs(Grouping):
189 """ Holds details of all of a users' designs. """
dbdb1782 190
7b84ba6d
OM
191 def __init__(self, user, directory, quick):
192 Grouping.__init__(self, quick)
3522a3bf
OM
193 self.user = user
194 self.url = "{}/{}/designs".format(URL_BASE, self.user)
dbdb1782
OM
195 self.download_dir = os.path.join(
196 directory, "{} designs".format(slugify(self.user)))
948bd56f 197 self.collection_url = USER_COLLECTION
975060c9 198
dbdb1782 199
3c82f75b
OM
200class Thing:
201 """ An individual design on thingiverse. """
dbdb1782 202
3c82f75b
OM
203 def __init__(self, thing_id):
204 self.thing_id = thing_id
205 self.last_time = None
206 self._parsed = False
207 self._needs_download = True
208 self.text = None
209 self.title = None
210 self.download_dir = None
975060c9 211
3c82f75b
OM
212 def _parse(self, base_dir):
213 """ Work out what, if anything needs to be done. """
214 if self._parsed:
215 return
e36c2a07 216
3c82f75b 217 url = "{}/thing:{}/files".format(URL_BASE, self.thing_id)
e0e69fc6 218 try:
8cdd1b54 219 req = requests.get(url)
e0e69fc6 220 except requests.exceptions.ConnectionError as error:
8cdd1b54
OM
221 logging.error("Unable to connect for thing {}: {}".format(
222 self.thing_id, error))
223 return
e0e69fc6 224
3c82f75b
OM
225 self.text = req.text
226 soup = BeautifulSoup(self.text, features='lxml')
680039fe
OM
227 #import code
228 #code.interact(local=dict(globals(), **locals()))
e0e69fc6 229 try:
8cdd1b54 230 self.title = slugify(soup.find_all('h1')[0].text.strip())
e0e69fc6 231 except IndexError:
8cdd1b54
OM
232 logging.warning(
233 "No title found for thing {}".format(self.thing_id))
234 self.title = self.thing_id
e0e69fc6
OM
235
236 if req.status_code == 404:
8cdd1b54
OM
237 logging.warning(
238 "404 for thing {} - DMCA or invalid number?".format(self.thing_id))
239 return
e0e69fc6
OM
240
241 if req.status_code > 299:
8cdd1b54
OM
242 logging.warning(
243 "bad status code {} for thing {} - try again later?".format(req.status_code, self.thing_id))
244 return
e0e69fc6 245
3c82f75b
OM
246 self.download_dir = os.path.join(base_dir, self.title)
247
fa2f3251
OM
248 logging.debug("Parsing {} ({})".format(self.thing_id, self.title))
249
3c82f75b
OM
250 if not os.path.exists(self.download_dir):
251 # Not yet downloaded
252 self._parsed = True
253 return
254
255 timestamp_file = os.path.join(self.download_dir, 'timestamp.txt')
256 if not os.path.exists(timestamp_file):
257 # Old download from before
dbdb1782
OM
258 logging.warning(
259 "Old-style download directory found. Assuming update required.")
3c82f75b
OM
260 self._parsed = True
261 return
262
263 try:
264 with open(timestamp_file, 'r') as timestamp_handle:
265 self.last_time = timestamp_handle.readlines()[0]
fa2f3251 266 logging.info("last downloaded version: {}".format(self.last_time))
3c82f75b
OM
267 except FileNotFoundError:
268 # Not run on this thing before.
dbdb1782
OM
269 logging.info(
270 "Old-style download directory found. Assuming update required.")
3c82f75b
OM
271 self.last_time = None
272 self._parsed = True
273 return
274
275 # OK, so we have a timestamp, lets see if there is anything new to get
dbdb1782 276 file_links = soup.find_all('a', {'class': 'file-download'})
3c82f75b
OM
277 for file_link in file_links:
278 timestamp = file_link.find_all('time')[0]['datetime']
dbdb1782
OM
279 logging.debug("Checking {} (updated {})".format(
280 file_link["title"], timestamp))
3c82f75b 281 if timestamp > self.last_time:
dbdb1782
OM
282 logging.info(
283 "Found new/updated file {}".format(file_link["title"]))
3c82f75b
OM
284 self._needs_download = True
285 self._parsed = True
286 return
287 # Got here, so nope, no new files.
3c82f75b
OM
288 self._needs_download = False
289 self._parsed = True
290
291 def download(self, base_dir):
7b84ba6d
OM
292 """ Download all files for a given thing.
293 Returns True iff the thing is now downloaded (not iff it downloads the thing!)
294 """
3c82f75b
OM
295 if not self._parsed:
296 self._parse(base_dir)
297
e0e69fc6 298 if not self._parsed:
8cdd1b54
OM
299 logging.error(
300 "Unable to parse {} - aborting download".format(self.thing_id))
7b84ba6d 301 return State.FAILED
e0e69fc6 302
3c82f75b 303 if not self._needs_download:
7b84ba6d
OM
304 print("{} - {} already downloaded - skipping.".format(self.thing_id, self.title))
305 return State.ALREADY_DOWNLOADED
3c82f75b
OM
306
307 # Have we already downloaded some things?
308 timestamp_file = os.path.join(self.download_dir, 'timestamp.txt')
309 prev_dir = None
310 if os.path.exists(self.download_dir):
311 if not os.path.exists(timestamp_file):
312 # edge case: old style dir w/out timestamp.
dbdb1782
OM
313 logging.warning(
314 "Old style download dir found for {}".format(self.title))
4f94efc8
OM
315 prev_count = 0
316 target_dir = "{}_old".format(self.download_dir)
317 while os.path.exists(target_dir):
318 prev_count = prev_count + 1
319 target_dir = "{}_old_{}".format(self.download_dir, prev_count)
320 os.rename(self.download_dir, target_dir)
3c82f75b 321 else:
2560222a 322 prev_dir = "{}_{}".format(self.download_dir, slugify(self.last_time))
3c82f75b
OM
323 os.rename(self.download_dir, prev_dir)
324
325 # Get the list of files to download
326 soup = BeautifulSoup(self.text, features='lxml')
dbdb1782 327 file_links = soup.find_all('a', {'class': 'file-download'})
3c82f75b
OM
328
329 new_file_links = []
330 old_file_links = []
331 new_last_time = None
332
333 if not self.last_time:
334 # If we don't have anything to copy from, then it is all new.
335 new_file_links = file_links
e0e69fc6 336 try:
8cdd1b54 337 new_last_time = file_links[0].find_all('time')[0]['datetime']
e0e69fc6 338 except:
8cdd1b54
OM
339 import code
340 code.interact(local=dict(globals(), **locals()))
e0e69fc6 341
3c82f75b
OM
342 for file_link in file_links:
343 timestamp = file_link.find_all('time')[0]['datetime']
dbdb1782
OM
344 logging.debug("Found file {} from {}".format(
345 file_link["title"], timestamp))
3c82f75b
OM
346 if timestamp > new_last_time:
347 new_last_time = timestamp
348 else:
349 for file_link in file_links:
350 timestamp = file_link.find_all('time')[0]['datetime']
dbdb1782
OM
351 logging.debug("Checking {} (updated {})".format(
352 file_link["title"], timestamp))
3c82f75b
OM
353 if timestamp > self.last_time:
354 new_file_links.append(file_link)
355 else:
356 old_file_links.append(file_link)
357 if not new_last_time or timestamp > new_last_time:
358 new_last_time = timestamp
359
fa2f3251 360 logging.debug("new timestamp {}".format(new_last_time))
3c82f75b
OM
361
362 # OK. Time to get to work.
fa2f3251 363 logging.debug("Generating download_dir")
3c82f75b
OM
364 os.mkdir(self.download_dir)
365 # First grab the cached files (if any)
fa2f3251 366 logging.info("Copying {} unchanged files.".format(len(old_file_links)))
3c82f75b
OM
367 for file_link in old_file_links:
368 old_file = os.path.join(prev_dir, file_link["title"])
369 new_file = os.path.join(self.download_dir, file_link["title"])
370 try:
fa2f3251 371 logging.debug("Copying {} to {}".format(old_file, new_file))
3c82f75b
OM
372 copyfile(old_file, new_file)
373 except FileNotFoundError:
dbdb1782
OM
374 logging.warning(
375 "Unable to find {} in old archive, redownloading".format(file_link["title"]))
3c82f75b
OM
376 new_file_links.append(file_link)
377
378 # Now download the new ones
dbdb1782
OM
379 files = [("{}{}".format(URL_BASE, x['href']), x["title"])
380 for x in new_file_links]
381 logging.info("Downloading {} new files of {}".format(
382 len(new_file_links), len(file_links)))
3c82f75b
OM
383 try:
384 for url, name in files:
385 file_name = os.path.join(self.download_dir, name)
dbdb1782
OM
386 logging.debug("Downloading {} from {} to {}".format(
387 name, url, file_name))
3c82f75b
OM
388 data_req = requests.get(url)
389 with open(file_name, 'wb') as handle:
390 handle.write(data_req.content)
391 except Exception as exception:
fa2f3251 392 logging.error("Failed to download {} - {}".format(name, exception))
3c82f75b 393 os.rename(self.download_dir, "{}_failed".format(self.download_dir))
7b84ba6d 394 return State.FAILED
3c82f75b 395
680039fe
OM
396 # People like images
397 image_dir = os.path.join(self.download_dir, 'images')
dbdb1782
OM
398 imagelinks = soup.find_all('span', {'class': 'gallery-slider'})[0] \
399 .find_all('div', {'class': 'gallery-photo'})
fa2f3251 400 logging.info("Downloading {} images.".format(len(imagelinks)))
680039fe
OM
401 try:
402 os.mkdir(image_dir)
fa2f3251 403 for imagelink in imagelinks:
8cdd1b54
OM
404 url = next(filter(None, [imagelink[x] for x in ['data-full',
405 'data-large',
406 'data-medium',
407 'data-thumb']]), None)
b7bfef68 408 if not url:
8cdd1b54
OM
409 logging.warning(
410 "Unable to find any urls for {}".format(imagelink))
b7bfef68
OM
411 continue
412
680039fe
OM
413 filename = os.path.basename(url)
414 if filename.endswith('stl'):
415 filename = "{}.png".format(filename)
416 image_req = requests.get(url)
417 with open(os.path.join(image_dir, filename), 'wb') as handle:
418 handle.write(image_req.content)
419 except Exception as exception:
420 print("Failed to download {} - {}".format(filename, exception))
421 os.rename(self.download_dir, "{}_failed".format(self.download_dir))
7b84ba6d 422 return State.FAILED
680039fe 423
4f75dd69
OM
424 # instructions are good too.
425 logging.info("Downloading readme")
426 try:
8cdd1b54
OM
427 readme_txt = soup.find('meta', property='og:description')[
428 'content']
429 with open(os.path.join(self.download_dir, 'readme.txt'), 'w') as readme_handle:
4f75dd69
OM
430 readme_handle.write("{}\n".format(readme_txt))
431 except (TypeError, KeyError) as exception:
432 logging.warning("No readme? {}".format(exception))
433 except IOError as exception:
434 logging.warning("Failed to write readme! {}".format(exception))
435
436 # Best get some licenses
437 logging.info("Downloading license")
438 try:
8cdd1b54 439 license_txt = soup.find('div', {'class': 'license-text'}).text
4f75dd69 440 if license_txt:
8cdd1b54 441 with open(os.path.join(self.download_dir, 'license.txt'), 'w') as license_handle:
4f75dd69
OM
442 license_handle.write("{}\n".format(license_txt))
443 except AttributeError as exception:
444 logging.warning("No license? {}".format(exception))
445 except IOError as exception:
446 logging.warning("Failed to write license! {}".format(exception))
447
3c82f75b
OM
448 try:
449 # Now write the timestamp
450 with open(timestamp_file, 'w') as timestamp_handle:
451 timestamp_handle.write(new_last_time)
452 except Exception as exception:
453 print("Failed to write timestamp file - {}".format(exception))
454 os.rename(self.download_dir, "{}_failed".format(self.download_dir))
7b84ba6d 455 return State.FAILED
3c82f75b 456 self._needs_download = False
fa2f3251 457 logging.debug("Download of {} finished".format(self.title))
7b84ba6d 458 return State.OK
975060c9 459
dbdb1782 460
7b84ba6d 461def do_batch(batch_file, download_dir, quick):
1ab49020
OM
462 """ Read a file in line by line, parsing each as a set of calls to this script."""
463 with open(batch_file) as handle:
464 for line in handle:
465 line = line.strip()
466 logging.info("Handling instruction {}".format(line))
467 command_arr = line.split()
468 if command_arr[0] == "thing":
dbdb1782
OM
469 logging.debug(
470 "Handling batch thing instruction: {}".format(line))
1ab49020
OM
471 Thing(command_arr[1]).download(download_dir)
472 continue
473 if command_arr[0] == "collection":
dbdb1782
OM
474 logging.debug(
475 "Handling batch collection instruction: {}".format(line))
476 Collection(command_arr[1], command_arr[2],
7b84ba6d 477 download_dir, quick).download()
1ab49020
OM
478 continue
479 if command_arr[0] == "user":
dbdb1782
OM
480 logging.debug(
481 "Handling batch collection instruction: {}".format(line))
7b84ba6d 482 Designs(command_arr[1], download_dir, quick).download()
1ab49020
OM
483 continue
484 logging.warning("Unable to parse current instruction. Skipping.")
485
dbdb1782 486
975060c9
OM
487def main():
488 """ Entry point for script being run as a command. """
489 parser = argparse.ArgumentParser()
dbdb1782
OM
490 parser.add_argument("-l", "--log-level", choices=[
491 'debug', 'info', 'warning'], default='info', help="level of logging desired")
492 parser.add_argument("-d", "--directory",
493 help="Target directory to download into")
4f94efc8
OM
494 parser.add_argument("-f", "--log-file",
495 help="Place to log debug information to")
7b84ba6d
OM
496 parser.add_argument("-q", "--quick", action="store_true",
497 help="Assume date ordering on posts")
498
dbdb1782
OM
499 subparsers = parser.add_subparsers(
500 help="Type of thing to download", dest="subcommand")
501 collection_parser = subparsers.add_parser(
b7bfef68 502 'collection', help="Download one or more entire collection(s)")
dbdb1782 503 collection_parser.add_argument(
b7bfef68 504 "owner", help="The owner of the collection(s) to get")
dbdb1782 505 collection_parser.add_argument(
b7bfef68 506 "collections", nargs="+", help="Space seperated list of the name(s) of collection to get")
dbdb1782
OM
507 thing_parser = subparsers.add_parser(
508 'thing', help="Download a single thing.")
8cdd1b54
OM
509 thing_parser.add_argument(
510 "things", nargs="*", help="Space seperated list of thing ID(s) to download")
dbdb1782 511 user_parser = subparsers.add_parser(
b7bfef68 512 "user", help="Download all things by one or more users")
8cdd1b54
OM
513 user_parser.add_argument(
514 "users", nargs="+", help="A space seperated list of the user(s) to get the designs of")
dbdb1782
OM
515 batch_parser = subparsers.add_parser(
516 "batch", help="Perform multiple actions written in a text file")
517 batch_parser.add_argument(
518 "batch_file", help="The name of the file to read.")
680039fe 519 subparsers.add_parser("version", help="Show the current version")
4a98996b 520
975060c9 521 args = parser.parse_args()
4a98996b
OM
522 if not args.subcommand:
523 parser.print_help()
524 sys.exit(1)
d66f1f78
OM
525 if not args.directory:
526 args.directory = os.getcwd()
4f94efc8
OM
527
528 logger = logging.getLogger()
529 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
530 logger.setLevel(logging.DEBUG)
531 console_handler = logging.StreamHandler()
532 console_handler.setLevel(args.log_level.upper())
533
534 logger.addHandler(console_handler)
535 if args.log_file:
536 file_handler = logging.FileHandler(args.log_file)
537 file_handler.setLevel(logging.DEBUG)
538 file_handler.setFormatter(formatter)
539 logger.addHandler(file_handler)
fa2f3251 540
6a777954
OM
541
542 # Start downloader
543 thing_queue = multiprocessing.JoinableQueue()
544 logging.debug("starting {} downloader(s)".format(DOWNLOADER_COUNT))
545 downloaders = [Downloader(thing_queue, args.directory) for _ in range(DOWNLOADER_COUNT)]
546 for downloader in downloaders:
547 downloader.start()
548
549
4a98996b 550 if args.subcommand.startswith("collection"):
b7bfef68 551 for collection in args.collections:
7b84ba6d 552 Collection(args.owner, collection, args.directory, args.quick).download()
4a98996b 553 if args.subcommand == "thing":
b7bfef68 554 for thing in args.things:
6a777954 555 thing_queue.put(thing)
3522a3bf 556 if args.subcommand == "user":
b7bfef68 557 for user in args.users:
7b84ba6d 558 Designs(user, args.directory, args.quick).download()
db8066ec
OM
559 if args.subcommand == "version":
560 print("thingy_grabber.py version {}".format(VERSION))
1ab49020 561 if args.subcommand == "batch":
7b84ba6d 562 do_batch(args.batch_file, args.directory, args.quick)
1ab49020 563
6a777954
OM
564 # Stop the downloader processes
565 for downloader in downloaders:
566 thing_queue.put(None)
975060c9
OM
567
568if __name__ == "__main__":
569 main()