Fix for scanning directories

nightly
Andrew 4 years ago
parent e0ea7aa619
commit 7438c3ffe6
  1. 14
      stashr/utils.py

@ -1051,6 +1051,15 @@ def new_create_scrape_entries():
def new_create_empty_scrape_entry(item): def new_create_empty_scrape_entry(item):
check_scrape_item = database.session \
.query(database.ScrapeItems) \
.filter(database.ScrapeItems.scrape_directory_id == item.directory_id) \
.first() \
if check_scrape_item is not None:
logger.warning('Scrape Item in database')
return
new_scrape_item = database.ScrapeItems( new_scrape_item = database.ScrapeItems(
scrape_directory = item.directory_path, scrape_directory = item.directory_path,
scrape_directory_id = item.directory_id scrape_directory_id = item.directory_id
@ -1069,6 +1078,11 @@ def new_get_scrape_candidates(item):
.query(database.ScrapeItems) \ .query(database.ScrapeItems) \
.filter(database.ScrapeItems.scrape_directory == item.directory_path) \ .filter(database.ScrapeItems.scrape_directory == item.directory_path) \
.first() .first()
if scrape_item.scrape_candidate is not None:
logger.warning('Item Already Scraped')
return
candidates = cv.search(item.directory_path, limit=10, resources=['volume']) candidates = cv.search(item.directory_path, limit=10, resources=['volume'])
scrape_candidate = candidates.results[0]['id'] scrape_candidate = candidates.results[0]['id']
match_found = False match_found = False

Loading…
Cancel
Save