aboutsummaryrefslogtreecommitdiff
path: root/src/scraping/buxton/scraper.py
diff options
context:
space:
mode:
Diffstat (limited to 'src/scraping/buxton/scraper.py')
-rw-r--r--src/scraping/buxton/scraper.py67
1 files changed, 42 insertions, 25 deletions
diff --git a/src/scraping/buxton/scraper.py b/src/scraping/buxton/scraper.py
index a9256073b..2d1a5ca32 100644
--- a/src/scraping/buxton/scraper.py
+++ b/src/scraping/buxton/scraper.py
@@ -13,11 +13,12 @@ import math
import sys
source = "./source"
-dist = "../../server/public/files"
+filesPath = "../../server/public/files"
+image_dist = filesPath + "/images/buxton"
db = MongoClient("localhost", 27017)["Dash"]
target_collection = db.newDocuments
-target_doc_title = "Workspace 1"
+target_doc_title = "Collection 1"
schema_guids = []
common_proto_id = ""
@@ -70,7 +71,7 @@ def text_doc_map(string_list):
return listify(proxify_guids(list(map(guid_map, string_list))))
-def write_collection(parse_results, display_fields, storage_key, viewType=2):
+def write_collection(parse_results, display_fields, storage_key, viewType):
view_guids = parse_results["child_guids"]
data_doc = parse_results["schema"]
@@ -99,13 +100,18 @@ def write_collection(parse_results, display_fields, storage_key, viewType=2):
fields[storage_key] = listify(proxify_guids(view_guids))
fields["schemaColumns"] = listify(display_fields)
fields["backgroundColor"] = "white"
- fields["scale"] = 0.5
fields["viewType"] = 2
fields["author"] = "Bill Buxton"
+ fields["disableLOD"] = True
fields["creationDate"] = {
"date": datetime.datetime.utcnow().microsecond,
"__type": "date"
}
+ if "image_urls" in parse_results:
+ fields["hero"] = {
+ "url": parse_results["image_urls"][0],
+ "__type": "image"
+ }
fields["isPrototype"] = True
fields["page"] = -1
@@ -167,14 +173,17 @@ def write_text_doc(content):
def write_image(folder, name):
- path = f"http://localhost:1050/files/{folder}/{name}"
+ path = f"http://localhost:1050/files/images/buxton/{folder}/{name}"
data_doc_guid = guid()
view_doc_guid = guid()
- image = Image.open(f"{dist}/{folder}/{name}")
+ image = Image.open(f"{image_dist}/{folder}/{name}")
native_width, native_height = image.size
+ if abs(native_width - native_height) < 10:
+ return None
+
view_doc = {
"_id": view_doc_guid,
"fields": {
@@ -213,7 +222,10 @@ def write_image(folder, name):
target_collection.insert_one(view_doc)
target_collection.insert_one(data_doc)
- return view_doc_guid
+ return {
+ "layout_id": view_doc_guid,
+ "url": path
+ }
def parse_document(file_name: str):
@@ -222,20 +234,26 @@ def parse_document(file_name: str):
result = {}
- dir_path = dist + "/" + pure_name
+ dir_path = image_dist + "/" + pure_name
+ print(dir_path)
mkdir_if_absent(dir_path)
raw = str(docx2txt.process(source + "/" + file_name, dir_path))
+ urls = []
view_guids = []
count = 0
for image in os.listdir(dir_path):
- count += 1
- view_guids.append(write_image(pure_name, image))
- copyfile(dir_path + "/" + image, dir_path +
- "/" + image.replace(".", "_o.", 1))
- copyfile(dir_path + "/" + image, dir_path +
- "/" + image.replace(".", "_m.", 1))
+ created = write_image(pure_name, image)
+ if created != None:
+ urls.append(created["url"])
+ view_guids.append(created["layout_id"])
+ count += 1
+ resolved = dir_path + "/" + image
+ original = dir_path + "/" + image.replace(".", "_o.", 1)
+ medium = dir_path + "/" + image.replace(".", "_m.", 1)
+ copyfile(resolved, original)
+ copyfile(resolved, medium)
print(f"extracted {count} images...")
def sanitize(line): return re.sub("[\n\t]+", "", line).replace(u"\u00A0", " ").replace(
@@ -342,7 +360,8 @@ def parse_document(file_name: str):
"fields": result,
"__type": "Doc"
},
- "child_guids": view_guids
+ "child_guids": view_guids,
+ "image_urls": urls
}
@@ -356,21 +375,19 @@ def write_common_proto():
"_id": id,
"fields": {
"proto": protofy("collectionProto"),
- "title": "Common Import Proto",
+ "title": "The Buxton Collection",
},
"__type": "Doc"
}
-
target_collection.insert_one(common_proto)
-
return id
-if os.path.exists(dist):
- shutil.rmtree(dist)
-while os.path.exists(dist):
+if os.path.exists(image_dist):
+ shutil.rmtree(image_dist)
+while os.path.exists(image_dist):
pass
-os.mkdir(dist)
+os.mkdir(image_dist)
mkdir_if_absent(source)
common_proto_id = write_common_proto()
@@ -380,7 +397,7 @@ for file_name in os.listdir(source):
if file_name.endswith('.docx'):
candidates += 1
schema_guids.append(write_collection(
- parse_document(file_name), ["title", "data"], "image_data"))
+ parse_document(file_name), ["title", "data"], "data", 5))
print("writing parent schema...")
parent_guid = write_collection({
@@ -390,7 +407,7 @@ parent_guid = write_collection({
"__type": "Doc"
},
"child_guids": schema_guids
-}, ["title", "short_description", "original_price"], "data", 1)
+}, ["title", "short_description", "original_price"], "data", 4)
print("appending parent schema to main workspace...\n")
target_collection.update_one(
@@ -400,7 +417,7 @@ target_collection.update_one(
print("rewriting .gitignore...\n")
lines = ['*', '!.gitignore']
-with open(dist + "/.gitignore", 'w') as f:
+with open(filesPath + "/.gitignore", 'w') as f:
f.write('\n'.join(lines))
suffix = "" if candidates == 1 else "s"