Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

3D algorithms benchmark script added #13

Open
wants to merge 17 commits into
base: develop
Choose a base branch
from
Prev Previous commit
Next Next commit
formatting, minors
Rostislav Vasilikhin committed Apr 4, 2024
commit 93782e31fab1fea1b215d7e609f09e6312a4a909
117 changes: 59 additions & 58 deletions download_collection.py
Original file line number Diff line number Diff line change
@@ -13,6 +13,7 @@
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
raise Exception("Python 3.5 or greater is required. Try running `python3 download_collection.py`")

# global params
verbose = False

class ModelData:
@@ -123,6 +124,7 @@ def download_model(model_name, dir):
print("Material written")
return mesh_path, texture_path


def get_thumb(model : ModelData, dir):
if verbose:
print(model.name)
@@ -257,78 +259,77 @@ def get_stanford_model(url : str, name : str, ext: str, dir : str, chunk_size :


# ==================================================
if __name__ == "__main__":
verbose = False

dirname = "dlmodels"

all_models = []
dirname = "dlmodels"

print("Getting Google Research models")
all_models = []

content_file = Path(dirname) / Path("content.json")
if content_file.exists():
with open(content_file, "r") as openfile:
models_json = json.load(openfile)
else:
Path(dirname).mkdir(parents=True, exist_ok=True)
models_json = get_content(content_file)
print("Getting Google Research models")

models = []
for model in models_json:
model_name = model['name']
desc = model['description']
fsize = model['filesize']
thumb_url = model['thumbnail_url']
if 'categories' in model:
categories = model['categories']
content_file = Path(dirname) / Path("content.json")
if content_file.exists():
with open(content_file, "r") as openfile:
models_json = json.load(openfile)
else:
categories = [ ]
models.append(ModelData(model_name, desc, fsize, thumb_url, categories))
Path(dirname).mkdir(parents=True, exist_ok=True)
models_json = get_content(content_file)

models = []
for model in models_json:
model_name = model['name']
desc = model['description']
fsize = model['filesize']
thumb_url = model['thumbnail_url']
if 'categories' in model:
categories = model['categories']
else:
categories = [ ]
models.append(ModelData(model_name, desc, fsize, thumb_url, categories))

print("Getting thumbnail images")
for model in models:
get_thumb(model, dirname)
print("Getting thumbnail images")
for model in models:
get_thumb(model, dirname)

print("Downloading models from the {}/{} collection.".format(owner_name, collection_name))
print("Downloading models from the {}/{} collection.".format(owner_name, collection_name))

for model in models:
model_dir = Path(dirname) / Path(model.name)
Path(model_dir).mkdir(parents=True, exist_ok=True)
model_path, texture_path = download_model(model.name, model_dir)
all_models.append((model_path, texture_path))
for model in models:
model_dir = Path(dirname) / Path(model.name)
Path(model_dir).mkdir(parents=True, exist_ok=True)
model_path, texture_path = download_model(model.name, model_dir)
all_models.append((model_path, texture_path))

print('Done.')
print('Done.')

categories = set()
for model in models:
for cat in model.categories:
categories.add(cat)
print("Categories:", categories)
#{'Consumer Goods', 'Bag', 'Car Seat',
# 'Keyboard', 'Media Cases', 'Toys',
# 'Action Figures', 'Bottles and Cans and Cups',
# 'Shoe', 'Legos', 'Hat',
# 'Mouse', 'Headphones', 'Stuffed Toys',
# 'Board Games', 'Camera'}
categories = set()
for model in models:
for cat in model.categories:
categories.add(cat)
print("Categories:", categories)
# 'Consumer Goods', 'Bag', 'Car Seat', 'Keyboard', 'Media Cases', 'Toys',
# 'Action Figures', 'Bottles and Cans and Cups', 'Shoe', 'Legos', 'Hat',
# 'Mouse', 'Headphones', 'Stuffed Toys', 'Board Games', 'Camera'

print("\nGetting Stanford models")
print("\nGetting Stanford models")

for m in stanford_models:
url, chunk_size, internal_path = m
for m in stanford_models:
url, chunk_size, internal_path = m

s = url.split("/")[-1].split(".")
name = "stanford_"+s[0]
ext = s[1]+"."+s[2]
s = url.split("/")[-1].split(".")
name = "stanford_"+s[0]
ext = s[1]+"."+s[2]

if verbose:
print(name + ":")
model_dir = Path(dirname) / Path(name)
Path(model_dir).mkdir(parents=True, exist_ok=True)
model_path, texture_path = get_stanford_model(url, name, ext, model_dir, chunk_size, internal_path)
all_models.append((model_path, texture_path))
if verbose:
print(name + ":")
model_dir = Path(dirname) / Path(name)
Path(model_dir).mkdir(parents=True, exist_ok=True)
model_path, texture_path = get_stanford_model(url, name, ext, model_dir, chunk_size, internal_path)
all_models.append((model_path, texture_path))

print("\nSubsampling")
print("\nSubsampling")

for mf, tf in all_models:
print(mf, tf)
verts, indices, normals, colors = cv.loadMesh(mf)
for mf, tf in all_models:
print(mf, tf)
verts, indices, normals, colors = cv.loadMesh(mf)