mirror of
https://github.com/immich-app/immich.git
synced 2026-02-14 21:08:15 +03:00
support resnet models, test failed models
This commit is contained in:
@@ -20,7 +20,8 @@ from shutil import rmtree
|
||||
# armnn only supports up to 4d tranposes, but the model has a 5d transpose due to a redundant unsqueeze
|
||||
# this function folds the unsqueeze+transpose+squeeze into a single 4d transpose
|
||||
# it also switches from gather ops to slices since armnn has different dimension semantics for gathers
|
||||
def onnx_transpose_4d(model_path: str):
|
||||
# also fixes batch normalization being in training mode
|
||||
def make_onnx_armnn_compatible(model_path: str):
|
||||
proto = onnx.load(model_path)
|
||||
graph = import_onnx(proto)
|
||||
|
||||
@@ -141,6 +142,60 @@ def onnx_transpose_4d(model_path: str):
|
||||
node2.inputs[idx] = squeeze_link
|
||||
except ValueError:
|
||||
pass
|
||||
elif node.op == "Reshape":
|
||||
for node1 in link1.outputs:
|
||||
if node1.op == "Gather":
|
||||
node2s = [n for l in node1.outputs for n in l.outputs]
|
||||
if any(n.op == "Abs" for n in node2s):
|
||||
axis = node1.attrs.get("axis", 0)
|
||||
index = node1.inputs[1].values
|
||||
slice_link = Variable(
|
||||
f"onnx::Slice_123{gather_idx}",
|
||||
dtype=node1.outputs[0].dtype,
|
||||
shape=[1] + node1.outputs[0].shape,
|
||||
)
|
||||
slice_node = Node(
|
||||
op="Slice",
|
||||
inputs=[
|
||||
node1.inputs[0],
|
||||
Constant(
|
||||
f"SliceStart_123{gather_idx}",
|
||||
np.array([index]),
|
||||
),
|
||||
Constant(
|
||||
f"SliceEnd_123{gather_idx}",
|
||||
np.array([index + 1]),
|
||||
),
|
||||
Constant(
|
||||
f"SliceAxis_123{gather_idx}",
|
||||
np.array([axis]),
|
||||
),
|
||||
],
|
||||
outputs=[slice_link],
|
||||
name=f"Slice_123{gather_idx}",
|
||||
)
|
||||
graph.nodes.append(slice_node)
|
||||
gather_idx += 1
|
||||
|
||||
squeeze_link = Variable(
|
||||
f"onnx::Squeeze_123{squeeze_idx}",
|
||||
dtype=node1.outputs[0].dtype,
|
||||
shape=node1.outputs[0].shape,
|
||||
)
|
||||
squeeze_node = Node(
|
||||
op="Squeeze",
|
||||
inputs=[slice_link, Constant(f"SqueezeAxis_123{squeeze_idx}",np.array([0]),)],
|
||||
outputs=[squeeze_link],
|
||||
name=f"Squeeze_123{squeeze_idx}",
|
||||
)
|
||||
graph.nodes.append(squeeze_node)
|
||||
squeeze_idx += 1
|
||||
for node2 in node2s:
|
||||
node2.inputs[0] = squeeze_link
|
||||
elif node.op == "BatchNormalization":
|
||||
if node.attrs.get("training_mode") == 1:
|
||||
node.attrs["training_mode"] = 0
|
||||
node.outputs = node.outputs[:1]
|
||||
|
||||
graph.cleanup(remove_unused_node_outputs=True, recurse_subgraphs=True, recurse_functions=True)
|
||||
graph.toposort()
|
||||
@@ -170,12 +225,19 @@ def onnx_make_fixed(input_path: str, output_path: str, input_shape: tuple[int, .
|
||||
simplified, success = onnxsim.simplify(input_path)
|
||||
if not success:
|
||||
raise RuntimeError(f"Failed to simplify {input_path}")
|
||||
onnx.save(simplified, output_path, save_as_external_data=True, all_tensors_to_one_file=False)
|
||||
try:
|
||||
onnx.save(simplified, output_path)
|
||||
except:
|
||||
onnx.save(simplified, output_path, save_as_external_data=True, all_tensors_to_one_file=False)
|
||||
infer_shapes_path(output_path, check_type=True, strict_mode=True, data_prop=True)
|
||||
model = onnx.load_model(output_path)
|
||||
make_input_shape_fixed(model.graph, model.graph.input[0].name, input_shape)
|
||||
fix_output_shapes(model)
|
||||
onnx.save(model, output_path, save_as_external_data=True, all_tensors_to_one_file=False)
|
||||
try:
|
||||
onnx.save(model, output_path)
|
||||
except:
|
||||
onnx.save(model, output_path, save_as_external_data=True, all_tensors_to_one_file=False)
|
||||
onnx.save(model, output_path)
|
||||
infer_shapes_path(output_path, check_type=True, strict_mode=True, data_prop=True)
|
||||
|
||||
|
||||
@@ -192,7 +254,6 @@ class ExportBase:
|
||||
super().__init__()
|
||||
self.name = name
|
||||
self.optimize = optimization_level
|
||||
self.nchw_transpose = False
|
||||
self.input_shape = input_shape
|
||||
self.pretrained = pretrained
|
||||
self.cache_dir = os.path.join(os.environ["CACHE_DIR"], self.model_name)
|
||||
@@ -213,7 +274,7 @@ class ExportBase:
|
||||
if not os.path.isfile(static_path):
|
||||
print(f"Making {self.model_name} ({self.task}) static")
|
||||
onnx_make_fixed(onnx_path_original, static_path, self.input_shape)
|
||||
onnx_transpose_4d(static_path)
|
||||
make_onnx_armnn_compatible(static_path)
|
||||
static_model = onnx.load_model(static_path)
|
||||
self.inputs = [input_.name for input_ in static_model.graph.input]
|
||||
self.outputs = [output_.name for output_ in static_model.graph.output]
|
||||
@@ -247,10 +308,10 @@ class ExportBase:
|
||||
armnn_fp16 = os.path.join(fp16_dir, "model.armnn")
|
||||
|
||||
args = ["./armnnconverter", "-f", "tflite-binary"]
|
||||
for input_ in self.inputs:
|
||||
args.extend(["-i", input_])
|
||||
for output_ in self.outputs:
|
||||
args.extend(["-o", output_])
|
||||
args.append("-i")
|
||||
args.extend(self.inputs)
|
||||
args.append("-o")
|
||||
args.extend(self.outputs)
|
||||
|
||||
fp32_args = args.copy()
|
||||
fp32_args.extend(["-m", tflite_fp32, "-p", armnn_fp32])
|
||||
@@ -320,32 +381,28 @@ def main() -> None:
|
||||
failed: list[Callable[[], ExportBase]] = [
|
||||
lambda: OpenClipVisual("ViT-H-14-378-quickgelu", (1, 3, 378, 378), pretrained="dfn5b"), # flatbuffers: cannot grow buffer beyond 2 gigabytes (will probably work with fp16)
|
||||
lambda: OpenClipVisual("ViT-H-14-quickgelu", (1, 3, 224, 224), pretrained="dfn5b"), # flatbuffers: cannot grow buffer beyond 2 gigabytes (will probably work with fp16)
|
||||
lambda: OpenClipTextual("nllb-clip-base-siglip", (1, 77), pretrained="v1"), # ERROR (tinynn.converter.base) Unsupported ops: aten::logical_not
|
||||
lambda: OpenClipTextual("nllb-clip-large-siglip", (1, 77), pretrained="v1"), # ERROR (tinynn.converter.base) Unsupported ops: aten::logical_not
|
||||
lambda: OpenClipVisual("ViT-L-14", (1, 3, 224, 224), pretrained="laion400m_e31"),
|
||||
lambda: OpenClipTextual("ViT-L-14", (1, 77), pretrained="laion400m_e31"),
|
||||
lambda: OpenClipVisual("ViT-L-14", (1, 3, 224, 224), pretrained="laion400m_e32"),
|
||||
lambda: OpenClipTextual("ViT-L-14", (1, 77), pretrained="laion400m_e32"),
|
||||
lambda: OpenClipVisual("ViT-L-14", (1, 3, 224, 224), pretrained="laion2b-s32b-b82k"),
|
||||
lambda: OpenClipTextual("ViT-L-14", (1, 77), pretrained="laion2b-s32b-b82k"),
|
||||
lambda: OpenClipVisual("ViT-H-14", (1, 3, 224, 224), pretrained="laion2b-s32b-b79k"),
|
||||
lambda: OpenClipTextual("ViT-H-14", (1, 77), pretrained="laion2b-s32b-b79k"),
|
||||
lambda: OpenClipVisual("ViT-g-14", (1, 3, 224, 224), pretrained="laion2b-s12b-b42k"),
|
||||
lambda: OpenClipTextual("ViT-g-14", (1, 77), pretrained="laion2b-s12b-b42k"),
|
||||
lambda: OpenClipVisual("XLM-Roberta-Large-Vit-B-16Plus", (1, 3, 240, 240)),
|
||||
lambda: OpenClipVisual("XLM-Roberta-Large-ViT-H-14", (1, 3, 224, 224), pretrained="frozen_laion5b_s13b_b90k"),
|
||||
lambda: OpenClipVisual("nllb-clip-base-siglip", (1, 3, 384, 384), pretrained="v1"),
|
||||
lambda: OpenClipVisual("nllb-clip-large-siglip", (1, 3, 384, 384), pretrained="v1"),
|
||||
lambda: OpenClipVisual("RN50", (1, 3, 224, 224), pretrained="yfcc15m"), # BatchNorm operation with mean/var output is not implemented
|
||||
lambda: OpenClipTextual("RN50", (1, 77), pretrained="yfcc15m"), # BatchNorm operation with mean/var output is not implemented
|
||||
lambda: OpenClipVisual("RN50", (1, 3, 224, 224), pretrained="cc12m"), # BatchNorm operation with mean/var output is not implemented
|
||||
lambda: OpenClipTextual("RN50", (1, 77), pretrained="cc12m"), # BatchNorm operation with mean/var output is not implemented
|
||||
lambda: MClipTextual("XLM-Roberta-Large-Vit-L-14", (1, 77)), # Expected normalized_shape to be at least 1-dimensional, i.e., containing at least one element, but got normalized_shape = []
|
||||
lambda: MClipTextual("XLM-Roberta-Large-Vit-B-16Plus", (1, 77)), # Expected normalized_shape to be at least 1-dimensional, i.e., containing at least one element, but got normalized_shape = []
|
||||
lambda: MClipTextual("LABSE-Vit-L-14", (1, 77)), # Expected normalized_shape to be at least 1-dimensional, i.e., containing at least one element, but got normalized_shape = []
|
||||
lambda: OpenClipTextual("XLM-Roberta-Large-ViT-H-14", (1, 77), pretrained="frozen_laion5b_s13b_b90k"), # Expected normalized_shape to be at least 1-dimensional, i.e., containing at least one element, but got normalized_shape = []
|
||||
]
|
||||
|
||||
oom = [
|
||||
lambda: OpenClipVisual("nllb-clip-base-siglip", (1, 3, 384, 384), pretrained="v1"),
|
||||
lambda: OpenClipTextual("nllb-clip-base-siglip", (1, 77), pretrained="v1"),
|
||||
lambda: OpenClipVisual("nllb-clip-large-siglip", (1, 3, 384, 384), pretrained="v1"),
|
||||
lambda: OpenClipTextual("nllb-clip-large-siglip", (1, 77), pretrained="v1"), # ERROR (tinynn.converter.base) Unsupported ops: aten::logical_not
|
||||
# lambda: OpenClipTextual("ViT-H-14-quickgelu", (1, 77), pretrained="dfn5b"),
|
||||
# lambda: OpenClipTextual("ViT-H-14-378-quickgelu", (1, 77), pretrained="dfn5b"),
|
||||
# lambda: OpenClipVisual("XLM-Roberta-Large-Vit-L-14", (1, 3, 224, 224)),
|
||||
]
|
||||
|
||||
succeeded: list[Callable[[], ExportBase]] = [
|
||||
# lambda: OpenClipVisual("ViT-B-32", (1, 3, 224, 224), pretrained="laion2b_e16"),
|
||||
# lambda: OpenClipTextual("ViT-B-32", (1, 77), pretrained="laion2b_e16"),
|
||||
@@ -363,18 +420,25 @@ def main() -> None:
|
||||
# lambda: OpenClipTextual("ViT-B-16-plus-240", (1, 77), pretrained="laion400m_e31"),
|
||||
# lambda: OpenClipVisual("ViT-B-32", (1, 3, 224, 224), pretrained="openai"),
|
||||
# lambda: OpenClipTextual("ViT-B-32", (1, 77), pretrained="openai"),
|
||||
lambda: OpenClipVisual("ViT-B-16", (1, 3, 224, 224), pretrained="openai"),
|
||||
lambda: OpenClipTextual("ViT-B-16", (1, 77), pretrained="openai"),
|
||||
# lambda: OpenClipVisual("ViT-L-14", (1, 3, 224, 224), pretrained="openai"),
|
||||
# lambda: OpenClipTextual("ViT-L-14", (1, 77), pretrained="openai"),
|
||||
# lambda: OpenClipVisual("ViT-L-14-336", (1, 3, 336, 336), pretrained="openai"),
|
||||
# lambda: OpenClipTextual("ViT-L-14-336", (1, 77), pretrained="openai"),
|
||||
# lambda: OpenClipVisual("ViT-B-16", (1, 3, 224, 224), pretrained="openai"),
|
||||
# lambda: OpenClipTextual("ViT-B-16", (1, 77), pretrained="openai"),
|
||||
# lambda: OpenClipVisual("RN50", (1, 3, 224, 224), pretrained="openai"),
|
||||
# lambda: OpenClipTextual("RN50", (1, 77), pretrained="openai"),
|
||||
# lambda: OpenClipTextual("ViT-H-14-quickgelu", (1, 77), pretrained="dfn5b"),
|
||||
# lambda: OpenClipTextual("ViT-H-14-378-quickgelu", (1, 77), pretrained="dfn5b"),
|
||||
# lambda: OpenClipVisual("XLM-Roberta-Large-Vit-L-14", (1, 3, 224, 224)),
|
||||
# lambda: OpenClipVisual("RN50", (1, 3, 224, 224), pretrained="yfcc15m"),
|
||||
# lambda: OpenClipTextual("RN50", (1, 77), pretrained="yfcc15m"),
|
||||
# lambda: OpenClipVisual("RN50", (1, 3, 224, 224), pretrained="cc12m"),
|
||||
# lambda: OpenClipTextual("RN50", (1, 77), pretrained="cc12m"),
|
||||
# lambda: OpenClipVisual("XLM-Roberta-Large-Vit-B-32", (1, 3, 224, 224)),
|
||||
# lambda: OpenClipVisual("ViT-L-14", (1, 3, 224, 224), pretrained="openai"),
|
||||
# lambda: OpenClipTextual("ViT-L-14", (1, 77), pretrained="openai"),
|
||||
lambda: OpenClipVisual("ViT-L-14", (1, 3, 224, 224), pretrained="laion400m_e31"),
|
||||
lambda: OpenClipTextual("ViT-L-14", (1, 77), pretrained="laion400m_e31"),
|
||||
lambda: OpenClipVisual("ViT-L-14", (1, 3, 224, 224), pretrained="laion400m_e32"),
|
||||
lambda: OpenClipTextual("ViT-L-14", (1, 77), pretrained="laion400m_e32"),
|
||||
lambda: OpenClipVisual("ViT-L-14", (1, 3, 224, 224), pretrained="laion2b-s32b-b82k"),
|
||||
lambda: OpenClipTextual("ViT-L-14", (1, 77), pretrained="laion2b-s32b-b82k"),
|
||||
# lambda: OpenClipVisual("ViT-L-14-336", (1, 3, 336, 336), pretrained="openai"),
|
||||
# lambda: OpenClipTextual("ViT-L-14-336", (1, 77), pretrained="openai"),
|
||||
# lambda: ArcFace("buffalo_s", (1, 3, 112, 112), optimization_level=3),
|
||||
# lambda: RetinaFace("buffalo_s", (1, 3, 640, 640), optimization_level=3),
|
||||
# lambda: ArcFace("buffalo_m", (1, 3, 112, 112), optimization_level=3),
|
||||
|
||||
Reference in New Issue
Block a user