Files
Hippolyzer/hippolyzer/lib/base/colladatools.py
2022-08-18 15:44:23 +00:00

321 lines
14 KiB
Python

# This currently implements basic LLMesh -> Collada.
#
# TODO:
# * inverse, Collada -> LLMesh (for simple cases, maybe using impasse rather than pycollada)
# * round-tripping tests, LLMesh->Collada->LLMesh
# * * Can't really test using Collada->LLMesh->Collada because Collada->LLMesh is almost always
# going to be lossy due to how SL represents vertex data and materials compared to what
# Collada allows.
# * Eventually scrap this and just use GLTF instead once we know we have the semantics correct
# * * Collada was just easier to bootstrap given that it's the only officially supported input format
# * * Collada tooling sucks and even LL is moving away from it
# * * Ensuring LLMesh->Collada and LLMesh->GLTF conversion don't differ semantically is easy via assimp.
import logging
import os.path
import secrets
import sys
from typing import Dict, List, Optional, Union, Sequence
import collada
import collada.source
from collada import E
from lxml import etree
import numpy as np
import transformations
from hippolyzer.lib.base.datatypes import Vector3
from hippolyzer.lib.base.helpers import get_resource_filename
from hippolyzer.lib.base.serialization import BufferReader
from hippolyzer.lib.base.mesh import LLMeshSerializer, MeshAsset, positions_from_domain, SkinSegmentDict
LOG = logging.getLogger(__name__)
DIR = os.path.dirname(os.path.realpath(__file__))
def llsd_to_mat4(mat: Union[np.ndarray, Sequence[float]]) -> np.ndarray:
return np.array(mat).reshape((4, 4), order='F')
def mat4_to_llsd(mat: np.ndarray) -> List[float]:
return list(mat.flatten(order='F'))
def mat4_to_collada(mat: np.ndarray) -> np.ndarray:
return mat.flatten(order='C')
def mesh_to_collada(ll_mesh: MeshAsset, include_skin=True) -> collada.Collada:
dae = collada.Collada()
axis = collada.asset.UP_AXIS.Z_UP
dae.assetInfo.upaxis = axis
scene = collada.scene.Scene("scene", [llmesh_to_node(ll_mesh, dae, include_skin=include_skin)])
dae.scenes.append(scene)
dae.scene = scene
return dae
def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
include_skin=True, node_transform: Optional[np.ndarray] = None) -> collada.scene.Node:
if node_transform is None:
node_transform = np.identity(4)
should_skin = False
skin_seg = ll_mesh.segments.get('skin')
bind_shape_matrix = None
if include_skin and skin_seg:
bind_shape_matrix = llsd_to_mat4(skin_seg["bind_shape_matrix"])
should_skin = True
# Transform from the skin will be applied on the controller, not the node
node_transform = np.identity(4)
if not uniq:
uniq = secrets.token_urlsafe(4)
geom_nodes = []
node_name = f"mainnode{uniq}"
# TODO: do the other LODs?
for submesh_num, submesh in enumerate(ll_mesh.segments["high_lod"]):
# Make sure none of our IDs collide with those of other nodes
sub_uniq = uniq + str(submesh_num)
range_xyz = positions_from_domain(submesh["Position"], submesh["PositionDomain"])
xyz = np.array([x.data() for x in range_xyz])
range_uv = positions_from_domain(submesh['TexCoord0'], submesh['TexCoord0Domain'])
uv = np.array([x.data() for x in range_uv]).flatten()
norms = np.array([x.data() for x in submesh["Normal"]])
effect = collada.material.Effect(
id=f"effect{sub_uniq}",
params=[],
specular=(0.0, 0.0, 0.0, 0.0),
reflectivity=(0.0, 0.0, 0.0, 0.0),
emission=(0.0, 0.0, 0.0, 0.0),
ambient=(0.0, 0.0, 0.0, 0.0),
reflective=0.0,
shadingtype="blinn",
shininess=0.0,
diffuse=(0.0, 0.0, 0.0),
)
mat = collada.material.Material(f"material{sub_uniq}", f"material{sub_uniq}", effect)
dae.materials.append(mat)
dae.effects.append(effect)
vert_src = collada.source.FloatSource(f"verts-array{sub_uniq}", xyz.flatten(), ("X", "Y", "Z"))
norm_src = collada.source.FloatSource(f"norms-array{sub_uniq}", norms.flatten(), ("X", "Y", "Z"))
# UV maps have to have the same name or they'll behave weirdly when objects are merged.
uv_src = collada.source.FloatSource("uvs-array", np.array(uv), ("U", "V"))
geom = collada.geometry.Geometry(dae, f"geometry{sub_uniq}", "geometry", [vert_src, norm_src, uv_src])
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', f'#verts-array{sub_uniq}', set="0")
input_list.addInput(0, 'NORMAL', f'#norms-array{sub_uniq}', set="0")
input_list.addInput(0, 'TEXCOORD', '#uvs-array', set="0")
tri_idxs = np.array(submesh["TriangleList"]).flatten()
matnode = collada.scene.MaterialNode(f"materialref{sub_uniq}", mat, inputs=[])
tri_set = geom.createTriangleSet(tri_idxs, input_list, f'materialref{sub_uniq}')
geom.primitives.append(tri_set)
dae.geometries.append(geom)
if should_skin:
joint_names = np.array(skin_seg['joint_names'], dtype=object)
joints_source = collada.source.NameSource(f"joint-names{sub_uniq}", joint_names, ("JOINT",))
# PyCollada has a bug where it doesn't set the source URI correctly. Fix it.
accessor = joints_source.xmlnode.find(f"{dae.tag('technique_common')}/{dae.tag('accessor')}")
if not accessor.get('source').startswith('#'):
accessor.set('source', f"#{accessor.get('source')}")
flattened_bind_poses = []
for bind_pose in skin_seg['inverse_bind_matrix']:
flattened_bind_poses.append(mat4_to_collada(llsd_to_mat4(bind_pose)))
flattened_bind_poses = np.array(flattened_bind_poses)
inv_bind_source = _create_mat4_source(f"bind-poses{sub_uniq}", flattened_bind_poses, "TRANSFORM")
weight_joint_idxs = []
weights = []
vert_weight_counts = []
cur_weight_idx = 0
for vert_weights in submesh['Weights']:
vert_weight_counts.append(len(vert_weights))
for vert_weight in vert_weights:
weights.append(vert_weight.weight)
weight_joint_idxs.append(vert_weight.joint_idx)
weight_joint_idxs.append(cur_weight_idx)
cur_weight_idx += 1
weights_source = collada.source.FloatSource(f"skin-weights{sub_uniq}", np.array(weights), ("WEIGHT",))
# We need to make a controller for each material since materials are essentially distinct meshes
# in SL, with their own distinct sets of weights and vertex data.
controller_node = E.controller(
E.skin(
E.bind_shape_matrix(' '.join(str(x) for x in mat4_to_collada(bind_shape_matrix))),
joints_source.xmlnode,
inv_bind_source.xmlnode,
weights_source.xmlnode,
E.joints(
E.input(semantic="JOINT", source=f"#joint-names{sub_uniq}"),
E.input(semantic="INV_BIND_MATRIX", source=f"#bind-poses{sub_uniq}")
),
E.vertex_weights(
E.input(semantic="JOINT", source=f"#joint-names{sub_uniq}", offset="0"),
E.input(semantic="WEIGHT", source=f"#skin-weights{sub_uniq}", offset="1"),
E.vcount(' '.join(str(x) for x in vert_weight_counts)),
E.v(' '.join(str(x) for x in weight_joint_idxs)),
count=str(len(submesh['Weights']))
),
source=f"#geometry{sub_uniq}"
),
id=f"Armature-{sub_uniq}",
name=node_name
)
controller = collada.controller.Controller.load(dae, {}, controller_node)
dae.controllers.append(controller)
geom_node = collada.scene.ControllerNode(controller, [matnode])
else:
geom_node = collada.scene.GeometryNode(geom, [matnode])
geom_nodes.append(geom_node)
node = collada.scene.Node(
node_name,
children=geom_nodes,
transforms=[collada.scene.MatrixTransform(mat4_to_collada(node_transform))],
)
if should_skin:
# We need a skeleton per _mesh asset_ because you could have incongruous skeletons
# within the same linkset.
# TODO: can we maintain some kind of skeleton cache, where if this skeleton has no conflicts
# with another skeleton in the cache, we just use that skeleton and add any additional joints?
skel_root = load_skeleton_nodes()
transform_skeleton(skel_root, dae, skin_seg)
skel = collada.scene.Node.load(dae, skel_root, {})
skel.children.append(node)
skel.id = f"Skel-{uniq}"
skel.save()
node = skel
return node
def load_skeleton_nodes() -> etree.ElementBase:
# TODO: this sucks. Can't we construct nodes with the appropriate transformation
# matrices from the data in `avatar_skeleton.xml`?
skel_path = get_resource_filename("lib/base/data/male_collada_joints.xml")
with open(skel_path, 'r') as f:
return etree.fromstring(f.read())
def transform_skeleton(skel_root: etree.ElementBase, dae: collada.Collada, skin_seg: SkinSegmentDict,
include_unreferenced_bones=False):
"""Update skeleton XML nodes to account for joint translations in the mesh"""
joint_nodes: Dict[str, collada.scene.Node] = {}
for skel_node in skel_root.iter():
# xpath is loathsome so this is easier.
if skel_node.tag != dae.tag('node') or skel_node.get('type') != 'JOINT':
continue
joint_nodes[skel_node.get('name')] = collada.scene.Node.load(dae, skel_node, {})
for joint_name, matrix in zip(skin_seg['joint_names'], skin_seg.get('alt_inverse_bind_matrix', [])):
joint_node = joint_nodes[joint_name]
joint_decomp = transformations.decompose_matrix(llsd_to_mat4(matrix))
joint_node.matrix = mat4_to_collada(transformations.compose_matrix(translate=joint_decomp[3]))
# Update the underlying XML element with the new transform matrix
joint_node.save()
if not include_unreferenced_bones:
needed_heirarchy = set()
for skel_node in joint_nodes.values():
skel_node = skel_node.xmlnode
if skel_node.get('name') in skin_seg['joint_names']:
# Add this joint and any ancestors the list of needed joints
while skel_node is not None:
needed_heirarchy.add(skel_node.get('name'))
skel_node = skel_node.getparent()
for skel_node in joint_nodes.values():
skel_node = skel_node.xmlnode
if skel_node.get('name') not in needed_heirarchy:
skel_node.getparent().remove(skel_node)
pelvis_offset = skin_seg.get('pelvis_offset')
# TODO: should we even do this here? It's not present in the collada, just
# something that's specified in the uploader before conversion to LLMesh.
if pelvis_offset and 'mPelvis' in joint_nodes:
pelvis_node = joint_nodes['mPelvis']
# Column-major!
pelvis_node.matrix[3][2] += pelvis_offset
pelvis_node.save()
def _create_mat4_source(name: str, data: np.ndarray, semantic: str):
# PyCollada has no way to make a source with a float4x4 semantic. Do it a bad way.
# Note that collada demands column-major matrices whereas LLSD mesh has them row-major!
source = collada.source.FloatSource(name, data, tuple(f"M{x}" for x in range(16)))
accessor = source.xmlnode[1][0]
for child in list(accessor):
accessor.remove(child)
accessor.append(E.param(name=semantic, type="float4x4"))
return source
def fix_weird_bind_matrices(skin_seg: SkinSegmentDict):
"""
Fix weird-looking bind matrices to have normal scaling and rotations
Not sure why these even happen (weird mesh authoring programs?)
Sometimes get enormous inverse bind matrices (each component 10k+) and tiny
bind shape matrix components. This detects inverse bind shape matrices
with weird scales and tries to set them to what they "should" be without
the weird inverted scaling.
"""
scale_fixup = Vector3(1, 1, 1)
angle_fixup = Vector3(0, 0, 0)
have_fixups = False
# Totally non-scientific method of detecting odd bind matrices based on squinting very,
# very hard at a random sample of assets.
for joint_name, joint_inv in zip(skin_seg['joint_names'], skin_seg['inverse_bind_matrix']):
if not joint_name.startswith("m"):
# We can't make very good guesses based on collision volume scales and rotations,
# skip anything but the "m" joints.
continue
joint_mat = llsd_to_mat4(joint_inv)
joint_scale, _, joint_angle, _, _ = transformations.decompose_matrix(joint_mat)
# If the scale component of an mJointName joint isn't roughly <1,1,1>, we likely have
# scaling applied to the inverse bind matrices rather than the bind matrix. Figure out
# what the fixup should be so that we can reverse it.
if abs(3.0 - sum(joint_scale)) > 0.5:
scale_fixup = Vector3(1, 1, 1) / Vector3(*joint_scale)
have_fixups = True
# I wouldn't expect mJointName joints to be rotated at all in their inverse bind matrices.
# Is this a rotation that should've been applied to the bind shape matrix instead?
# In any event, all joints are likely rotated by this amount, so calculate the inverse.
if abs(sum(joint_angle)) > 0.05:
angle_fixup = -Vector3(*joint_angle)
have_fixups = True
if have_fixups:
LOG.warning("Detected weird matrices in mesh!", scale_fixup, angle_fixup)
# The magnitude of the scales in the inverse bind matrices look very strange.
# The bind matrix itself is probably messed up as well, try to fix it.
# TODO: put this back in, the previous logic was totally wrong-headed.
# DON'T MESS WITH INVERSE TRANSLATION!!!! Only bind shape gets its translation scaled.
pass
def main():
# Take an llmesh file as an argument and spit out basename-converted.dae
with open(sys.argv[1], "rb") as f:
reader = BufferReader("<", f.read())
mesh = mesh_to_collada(reader.read(LLMeshSerializer(parse_segment_contents=True)))
mesh.write(sys.argv[1].rsplit(".", 1)[0] + "-converted.dae")
if __name__ == "__main__":
main()