Compare commits
34 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b6d8a70f4 | ||
|
|
1a308e9671 | ||
|
|
7b21e5634c | ||
|
|
e4548a285d | ||
|
|
72e926f04c | ||
|
|
d9fa14b17c | ||
|
|
33c5abaaf4 | ||
|
|
2dfd61fcc5 | ||
|
|
eb58e747ce | ||
|
|
1d221a2289 | ||
|
|
2ffd0458d0 | ||
|
|
25f533a31b | ||
|
|
570dbce181 | ||
|
|
ccb63e971b | ||
|
|
8be4bce8bc | ||
|
|
e945706d2b | ||
|
|
6c748a6ab2 | ||
|
|
6abc7ca7d2 | ||
|
|
c57e0e467c | ||
|
|
e46b4adad2 | ||
|
|
5ef9b5354a | ||
|
|
34ca7d54be | ||
|
|
cb316f1992 | ||
|
|
da05a6cf1f | ||
|
|
f06c31e225 | ||
|
|
b4e5596ca2 | ||
|
|
49a54ce099 | ||
|
|
0349fd9078 | ||
|
|
118ef2813a | ||
|
|
256f74b71a | ||
|
|
4a84453ca4 | ||
|
|
34316cb166 | ||
|
|
0f7d35cdca | ||
|
|
2ee8a6f008 |
@@ -20,6 +20,7 @@ bulk upload, like changing priority or removing a joint.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import pathlib
|
||||
from abc import abstractmethod
|
||||
from typing import *
|
||||
@@ -106,7 +107,10 @@ class LocalAnimAddon(BaseAddon):
|
||||
if not anim_id:
|
||||
continue
|
||||
# is playing right now, check if there's a newer version
|
||||
self.apply_local_anim_from_file(session, region, anim_name, only_if_changed=True)
|
||||
try:
|
||||
self.apply_local_anim_from_file(session, region, anim_name, only_if_changed=True)
|
||||
except Exception:
|
||||
logging.exception("Exploded while replaying animation")
|
||||
await asyncio.sleep(1.0)
|
||||
|
||||
def handle_rlv_command(self, session: Session, region: ProxiedRegion, source: UUID,
|
||||
@@ -175,7 +179,6 @@ class LocalAnimAddon(BaseAddon):
|
||||
if only_if_changed and old_mtime == mtime:
|
||||
return
|
||||
|
||||
cls.local_anim_mtimes[anim_name] = mtime
|
||||
# file might not even exist anymore if mtime is `None`,
|
||||
# anim will automatically stop if that happens.
|
||||
if mtime:
|
||||
@@ -187,6 +190,7 @@ class LocalAnimAddon(BaseAddon):
|
||||
with open(anim_path, "rb") as f:
|
||||
anim_data = f.read()
|
||||
anim_data = cls._mangle_anim(anim_data)
|
||||
cls.local_anim_mtimes[anim_name] = mtime
|
||||
else:
|
||||
print(f"Unknown anim {anim_name!r}")
|
||||
cls.apply_local_anim(session, region, anim_name, new_data=anim_data)
|
||||
|
||||
@@ -7,6 +7,8 @@ in the appropriate format.
|
||||
from pathlib import Path
|
||||
from typing import *
|
||||
|
||||
from hippolyzer.lib.base.mesh import LLMeshSerializer
|
||||
from hippolyzer.lib.base.serialization import BufferReader
|
||||
from hippolyzer.lib.base.templates import AssetType
|
||||
from hippolyzer.lib.proxy.addons import AddonManager
|
||||
from hippolyzer.lib.proxy.addon_utils import show_message, BaseAddon
|
||||
@@ -38,8 +40,11 @@ class UploaderAddon(BaseAddon):
|
||||
try:
|
||||
if asset_type == AssetType.MESH:
|
||||
# Kicking off a mesh upload works a little differently internally
|
||||
# Half-parse the mesh so that we can figure out how many faces it has
|
||||
reader = BufferReader("!", file_body)
|
||||
mesh = reader.read(LLMeshSerializer(parse_segment_contents=False))
|
||||
upload_token = await region.asset_uploader.initiate_mesh_upload(
|
||||
name, file_body, flags=flags
|
||||
name, mesh, flags=flags
|
||||
)
|
||||
else:
|
||||
upload_token = await region.asset_uploader.initiate_asset_upload(
|
||||
|
||||
@@ -11,12 +11,11 @@
|
||||
# * * Collada tooling sucks and even LL is moving away from it
|
||||
# * * Ensuring LLMesh->Collada and LLMesh->GLTF conversion don't differ semantically is easy via assimp.
|
||||
|
||||
import collections
|
||||
import logging
|
||||
import os.path
|
||||
import secrets
|
||||
import statistics
|
||||
import sys
|
||||
from typing import Dict, List, Iterable, Optional
|
||||
from typing import Dict, Optional
|
||||
|
||||
import collada
|
||||
import collada.source
|
||||
@@ -27,11 +26,22 @@ import transformations
|
||||
|
||||
from hippolyzer.lib.base.helpers import get_resource_filename
|
||||
from hippolyzer.lib.base.serialization import BufferReader
|
||||
from hippolyzer.lib.base.mesh import LLMeshSerializer, MeshAsset, positions_from_domain, SkinSegmentDict
|
||||
from hippolyzer.lib.base.mesh import (
|
||||
LLMeshSerializer,
|
||||
MeshAsset,
|
||||
positions_from_domain,
|
||||
SkinSegmentDict,
|
||||
llsd_to_mat4,
|
||||
)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
||||
def mat4_to_collada(mat: np.ndarray) -> np.ndarray:
|
||||
return mat.flatten(order='C')
|
||||
|
||||
|
||||
def mesh_to_collada(ll_mesh: MeshAsset, include_skin=True) -> collada.Collada:
|
||||
dae = collada.Collada()
|
||||
axis = collada.asset.UP_AXIS.Z_UP
|
||||
@@ -52,7 +62,7 @@ def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
|
||||
skin_seg = ll_mesh.segments.get('skin')
|
||||
bind_shape_matrix = None
|
||||
if include_skin and skin_seg:
|
||||
bind_shape_matrix = np.array(skin_seg["bind_shape_matrix"]).reshape((4, 4))
|
||||
bind_shape_matrix = llsd_to_mat4(skin_seg["bind_shape_matrix"])
|
||||
should_skin = True
|
||||
# Transform from the skin will be applied on the controller, not the node
|
||||
node_transform = np.identity(4)
|
||||
@@ -85,7 +95,7 @@ def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
|
||||
reflective=0.0,
|
||||
shadingtype="blinn",
|
||||
shininess=0.0,
|
||||
diffuse=(0.0, 0.0, 0.0),
|
||||
diffuse=(1.0, 1.0, 1.0),
|
||||
)
|
||||
mat = collada.material.Material(f"material{sub_uniq}", f"material{sub_uniq}", effect)
|
||||
|
||||
@@ -119,9 +129,8 @@ def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
|
||||
accessor.set('source', f"#{accessor.get('source')}")
|
||||
|
||||
flattened_bind_poses = []
|
||||
# LLMesh matrices are row-major, convert to col-major for Collada.
|
||||
for bind_pose in skin_seg['inverse_bind_matrix']:
|
||||
flattened_bind_poses.append(np.array(bind_pose).reshape((4, 4)).flatten('F'))
|
||||
flattened_bind_poses.append(mat4_to_collada(llsd_to_mat4(bind_pose)))
|
||||
flattened_bind_poses = np.array(flattened_bind_poses)
|
||||
inv_bind_source = _create_mat4_source(f"bind-poses{sub_uniq}", flattened_bind_poses, "TRANSFORM")
|
||||
|
||||
@@ -142,7 +151,7 @@ def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
|
||||
# in SL, with their own distinct sets of weights and vertex data.
|
||||
controller_node = E.controller(
|
||||
E.skin(
|
||||
E.bind_shape_matrix(' '.join(str(x) for x in bind_shape_matrix.flatten('F'))),
|
||||
E.bind_shape_matrix(' '.join(str(x) for x in mat4_to_collada(bind_shape_matrix))),
|
||||
joints_source.xmlnode,
|
||||
inv_bind_source.xmlnode,
|
||||
weights_source.xmlnode,
|
||||
@@ -173,11 +182,13 @@ def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
|
||||
node = collada.scene.Node(
|
||||
node_name,
|
||||
children=geom_nodes,
|
||||
transforms=[collada.scene.MatrixTransform(np.array(node_transform.flatten('F')))],
|
||||
transforms=[collada.scene.MatrixTransform(mat4_to_collada(node_transform))],
|
||||
)
|
||||
if should_skin:
|
||||
# We need a skeleton per _mesh asset_ because you could have incongruous skeletons
|
||||
# within the same linkset.
|
||||
# TODO: can we maintain some kind of skeleton cache, where if this skeleton has no conflicts
|
||||
# with another skeleton in the cache, we just use that skeleton and add any additional joints?
|
||||
skel_root = load_skeleton_nodes()
|
||||
transform_skeleton(skel_root, dae, skin_seg)
|
||||
skel = collada.scene.Node.load(dae, skel_root, {})
|
||||
@@ -199,7 +210,6 @@ def load_skeleton_nodes() -> etree.ElementBase:
|
||||
def transform_skeleton(skel_root: etree.ElementBase, dae: collada.Collada, skin_seg: SkinSegmentDict,
|
||||
include_unreferenced_bones=False):
|
||||
"""Update skeleton XML nodes to account for joint translations in the mesh"""
|
||||
# TODO: Use translation component only.
|
||||
joint_nodes: Dict[str, collada.scene.Node] = {}
|
||||
for skel_node in skel_root.iter():
|
||||
# xpath is loathsome so this is easier.
|
||||
@@ -208,7 +218,8 @@ def transform_skeleton(skel_root: etree.ElementBase, dae: collada.Collada, skin_
|
||||
joint_nodes[skel_node.get('name')] = collada.scene.Node.load(dae, skel_node, {})
|
||||
for joint_name, matrix in zip(skin_seg['joint_names'], skin_seg.get('alt_inverse_bind_matrix', [])):
|
||||
joint_node = joint_nodes[joint_name]
|
||||
joint_node.matrix = np.array(matrix).reshape((4, 4)).flatten('F')
|
||||
joint_decomp = transformations.decompose_matrix(llsd_to_mat4(matrix))
|
||||
joint_node.matrix = mat4_to_collada(transformations.compose_matrix(translate=joint_decomp[3]))
|
||||
# Update the underlying XML element with the new transform matrix
|
||||
joint_node.save()
|
||||
|
||||
@@ -249,48 +260,61 @@ def _create_mat4_source(name: str, data: np.ndarray, semantic: str):
|
||||
return source
|
||||
|
||||
|
||||
def fix_weird_bind_matrices(skin_seg: SkinSegmentDict):
|
||||
def fix_weird_bind_matrices(skin_seg: SkinSegmentDict) -> None:
|
||||
"""
|
||||
Fix weird-looking bind matrices to have normal scaling
|
||||
Fix weird-looking bind matrices to have sensible scaling and rotations
|
||||
|
||||
Not sure why these even happen (weird mesh authoring programs?)
|
||||
Sometimes get enormous inverse bind matrices (each component 10k+) and tiny
|
||||
Sometimes we get enormous inverse bind matrices (each component 10k+) and tiny
|
||||
bind shape matrix components. This detects inverse bind shape matrices
|
||||
with weird scales and tries to set them to what they "should" be without
|
||||
the weird inverted scaling.
|
||||
"""
|
||||
axis_counters = [collections.Counter() for _ in range(3)]
|
||||
for joint_inv in skin_seg['inverse_bind_matrix']:
|
||||
joint_mat = np.array(joint_inv).reshape((4, 4))
|
||||
joint_scale = transformations.decompose_matrix(joint_mat)[0]
|
||||
for axis_counter, axis_val in zip(axis_counters, joint_scale):
|
||||
axis_counter[axis_val] += 1
|
||||
most_common_inv_scale = []
|
||||
for axis_counter in axis_counters:
|
||||
most_common_inv_scale.append(axis_counter.most_common(1)[0][0])
|
||||
|
||||
if abs(1.0 - statistics.fmean(most_common_inv_scale)) > 1.0:
|
||||
# The magnitude of the scales in the inverse bind matrices look very strange.
|
||||
# The bind matrix itself is probably messed up as well, try to fix it.
|
||||
skin_seg['bind_shape_matrix'] = fix_llsd_matrix_scale(skin_seg['bind_shape_matrix'], most_common_inv_scale)
|
||||
if joint_positions := skin_seg.get('alt_inverse_bind_matrix', None):
|
||||
fix_matrix_list_scale(joint_positions, most_common_inv_scale)
|
||||
rev_scale = tuple(1.0 / x for x in most_common_inv_scale)
|
||||
fix_matrix_list_scale(skin_seg['inverse_bind_matrix'], rev_scale)
|
||||
# Sometimes we get mesh assets that have the vertex data naturally in y-up orientation,
|
||||
# and get re-oriented to z-up not through the bind shape matrix, but through the
|
||||
# transforms in the inverse bind matrices!
|
||||
#
|
||||
# Blender, for one, does not like this very much, and generally won't generate mesh
|
||||
# assets like this, as explained here https://developer.blender.org/T38660.
|
||||
# In vanilla Blender, these mesh assets will show up scaled and rotated _only_ according
|
||||
# to the bind shape matrix, which may end up with the model 25 meters tall and sitting
|
||||
# on its side.
|
||||
#
|
||||
# https://avalab.org/avastar/292/knowledge/compare-workbench/, while somewhat outdated,
|
||||
# has some information on rest pose vs default pose and scaling that I believe is relevant.
|
||||
# https://github.com/KhronosGroup/glTF-Blender-IO/issues/994 as well.
|
||||
#
|
||||
# While trying to figure out what was going on, I searched for something like
|
||||
# "inverse bind matrix scale collada", "bind pose scale blender", etc. Pretty much every
|
||||
# result was either a bug filed by, or a question asked by the creator of Avastar, or an SL user.
|
||||
# I think that says a lot about how annoying it is to author mesh for SL in particular.
|
||||
#
|
||||
# I spent a good month or so tearing my hair out over this wondering how these values could
|
||||
# even be possible. I wasn't sure how I should write mesh import code if I don't understand
|
||||
# how to interpret existing data, or how it even ended up the way it did. Turns out I wasn't
|
||||
# misinterpreting the data, the data really is just weird.
|
||||
#
|
||||
# I'd also had the idea that you could sniff which body a given rigged asset was meant
|
||||
# for by doing trivial matching on the inverse bind matrices, but obviously that isn't true!
|
||||
#
|
||||
# Basically:
|
||||
# 1) Maya is evil and generates evil, this evil bleeds into SL's assets through transforms.
|
||||
# 2) Blender is also evil, but in a manner that doesn't agree with Maya's evil.
|
||||
# 3) Collada was a valiant effort, but is evil in practice. Seemingly simple Collada
|
||||
# files are interpreted completely differently by Blender, Maya, and sometimes SL.
|
||||
# 4) Those three evils collude to make an interop nightmare for everyone like "oh my rigger
|
||||
# rigs using Maya and now my model is huge and all my normals are fucked on reimport"
|
||||
# 5) Yes, there's still good reasons to be using Avastar in 2022 even though nobody authoring
|
||||
# rigged mesh for any other use has to use something similar.
|
||||
|
||||
if not skin_seg['joint_names']:
|
||||
return
|
||||
|
||||
def fix_matrix_list_scale(source: List[List[float]], scale_fixup: Iterable[float]):
|
||||
for i, alt_inv_matrix in enumerate(source):
|
||||
source[i] = fix_llsd_matrix_scale(alt_inv_matrix, scale_fixup)
|
||||
|
||||
|
||||
def fix_llsd_matrix_scale(source: List[float], scale_fixup: Iterable[float]):
|
||||
matrix = np.array(source).reshape((4, 4))
|
||||
decomposed = list(transformations.decompose_matrix(matrix))
|
||||
# Need to handle both the scale and translation matrices
|
||||
for idx in (0, 3):
|
||||
decomposed[idx] = tuple(x * y for x, y in zip(decomposed[idx], scale_fixup))
|
||||
return list(transformations.compose_matrix(*decomposed).flatten('C'))
|
||||
# TODO: calculate the correct inverse bind matrix scale & rotations from avatar_skeleton.xml
|
||||
# definitions. If the rotation and scale factors are the same across all inverse bind matrices then
|
||||
# they can be moved over to the bind shape matrix to keep Blender happy.
|
||||
# Maybe add a scaled / rotated empty as a parent for the armature instead?
|
||||
return
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -29,6 +29,7 @@ import math
|
||||
from typing import *
|
||||
|
||||
import recordclass
|
||||
import transformations
|
||||
|
||||
logger = getLogger('hippolyzer.lib.base.datatypes')
|
||||
|
||||
@@ -38,6 +39,9 @@ class _IterableStub:
|
||||
__iter__: Callable
|
||||
|
||||
|
||||
RAD_TO_DEG = 180 / math.pi
|
||||
|
||||
|
||||
class TupleCoord(recordclass.datatuple, _IterableStub): # type: ignore
|
||||
__options__ = {
|
||||
"fast_new": False,
|
||||
@@ -220,6 +224,15 @@ class Quaternion(TupleCoord):
|
||||
)
|
||||
return super().__mul__(other)
|
||||
|
||||
@classmethod
|
||||
def from_transformations(cls, coord) -> Quaternion:
|
||||
"""Convert to W (S) last form"""
|
||||
return cls(coord[1], coord[2], coord[3], coord[0])
|
||||
|
||||
def to_transformations(self) -> Tuple[float, float, float, float]:
|
||||
"""Convert to W (S) first form for use with the transformations lib"""
|
||||
return self.W, self.X, self.Y, self.Z
|
||||
|
||||
@classmethod
|
||||
def from_euler(cls, roll, pitch, yaw, degrees=False):
|
||||
if degrees:
|
||||
@@ -241,6 +254,9 @@ class Quaternion(TupleCoord):
|
||||
|
||||
return cls(X=x, Y=y, Z=z, W=w)
|
||||
|
||||
def to_euler(self) -> Vector3:
|
||||
return Vector3(*transformations.euler_from_quaternion(self.to_transformations()))
|
||||
|
||||
def data(self, wanted_components=None):
|
||||
if wanted_components == 3:
|
||||
return self.X, self.Y, self.Z
|
||||
@@ -359,5 +375,5 @@ class TaggedUnion(recordclass.datatuple): # type: ignore
|
||||
__all__ = [
|
||||
"Vector3", "Vector4", "Vector2", "Quaternion", "TupleCoord",
|
||||
"UUID", "RawBytes", "StringEnum", "JankStringyBytes", "TaggedUnion",
|
||||
"IntEnum", "IntFlag", "flags_to_pod", "Pretty"
|
||||
"IntEnum", "IntFlag", "flags_to_pod", "Pretty", "RAD_TO_DEG"
|
||||
]
|
||||
|
||||
525
hippolyzer/lib/base/gltftools.py
Normal file
525
hippolyzer/lib/base/gltftools.py
Normal file
@@ -0,0 +1,525 @@
|
||||
"""
|
||||
WIP LLMesh -> glTF converter, for testing eventual glTF -> LLMesh conversion logic.
|
||||
"""
|
||||
# TODO:
|
||||
# * Simple tests
|
||||
# * Round-tripping skinning data from Blender-compatible glTF back to LLMesh (maybe through rig retargeting?)
|
||||
# * Panda3D-glTF viewer for LLMesh? The glTFs seem to work fine in Panda3D-glTF's `gltf-viewer`.
|
||||
# * Check if skew and projection components of transform matrices are ignored in practice as the spec requires.
|
||||
# I suppose this would render some real assets impossible to represent with glTF.
|
||||
|
||||
import dataclasses
|
||||
import math
|
||||
import pprint
|
||||
import sys
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import *
|
||||
|
||||
import gltflib
|
||||
import numpy as np
|
||||
import transformations
|
||||
|
||||
from hippolyzer.lib.base.datatypes import Vector3
|
||||
from hippolyzer.lib.base.mesh import (
|
||||
LLMeshSerializer, MeshAsset, positions_from_domain, SkinSegmentDict, VertexWeight, llsd_to_mat4
|
||||
)
|
||||
from hippolyzer.lib.base.mesh_skeleton import AVATAR_SKELETON
|
||||
from hippolyzer.lib.base.serialization import BufferReader
|
||||
|
||||
|
||||
class IdentityList(list):
|
||||
"""
|
||||
List, but does index() by object identity, not equality
|
||||
|
||||
GLTF references objects by their index within some list, but we prefer to pass around
|
||||
actual object references internally. If we don't do this, then when we try and get
|
||||
a GLTF reference to a given object via `.index()` then we could end up actually getting
|
||||
a reference to some other object that just happens to be equal. This was causing issues
|
||||
with all primitives ending up with the same material, due to the default material's value
|
||||
being the same across all primitives.
|
||||
"""
|
||||
def index(self, value, start: Optional[int] = None, stop: Optional[int] = None) -> int:
|
||||
view = self[start:stop]
|
||||
for i, x in enumerate(view):
|
||||
if x is value:
|
||||
if start:
|
||||
return i + start
|
||||
return i
|
||||
raise ValueError(value)
|
||||
|
||||
|
||||
def sl_to_gltf_coords(coords):
|
||||
"""
|
||||
SL (X, Y, Z) -> GL (X, Z, Y), as GLTF commandeth
|
||||
|
||||
Note that this will only work when reordering axes, flipping an axis is more complicated.
|
||||
"""
|
||||
return coords[0], coords[2], coords[1], *coords[3:]
|
||||
|
||||
|
||||
def sl_to_gltf_uv(uv):
|
||||
"""Flip the V coordinate of a UV to match glTF convention"""
|
||||
return [uv[0], -uv[1]]
|
||||
|
||||
|
||||
def sl_mat4_to_gltf(mat: np.ndarray) -> List[float]:
|
||||
"""
|
||||
Convert an SL Mat4 to the glTF coordinate system
|
||||
|
||||
This should only be done immediately before storing the matrix in a glTF structure!
|
||||
"""
|
||||
# TODO: This is probably not correct. We definitely need to flip Z but there's
|
||||
# probably a better way to do it.
|
||||
decomp = [sl_to_gltf_coords(x) for x in transformations.decompose_matrix(mat)]
|
||||
trans = decomp[3]
|
||||
decomp[3] = (trans[0], trans[1], -trans[2])
|
||||
return list(transformations.compose_matrix(*decomp).flatten(order='F'))
|
||||
|
||||
|
||||
# Mat3 to convert points from SL coordinate space to GLTF coordinate space
|
||||
POINT_TO_GLTF_MAT = transformations.compose_matrix(angles=(-(math.pi / 2), 0, 0))[:3, :3]
|
||||
|
||||
|
||||
def sl_vec3_array_to_gltf(vec_list: np.ndarray) -> np.ndarray:
|
||||
new_array = []
|
||||
for x in vec_list:
|
||||
new_array.append(POINT_TO_GLTF_MAT.dot(x))
|
||||
return np.array(new_array)
|
||||
|
||||
|
||||
def sl_weights_to_gltf(sl_weights: List[List[VertexWeight]]) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Convert SL Weights to separate JOINTS_0 and WEIGHTS_0 vec4 arrays"""
|
||||
joints = np.zeros((len(sl_weights), 4), dtype=np.uint8)
|
||||
weights = np.zeros((len(sl_weights), 4), dtype=np.float32)
|
||||
|
||||
for i, vert_weights in enumerate(sl_weights):
|
||||
# We need to re-normalize these since the quantization can mess them up
|
||||
collected_weights = []
|
||||
for j, vert_weight in enumerate(vert_weights):
|
||||
joints[i, j] = vert_weight.joint_idx
|
||||
collected_weights.append(vert_weight.weight)
|
||||
weight_sum = sum(collected_weights)
|
||||
if weight_sum:
|
||||
for j, weight in enumerate(collected_weights):
|
||||
weights[i, j] = weight / weight_sum
|
||||
|
||||
return joints, weights
|
||||
|
||||
|
||||
def normalize_vec3(a):
|
||||
norm = np.linalg.norm(a)
|
||||
if norm == 0:
|
||||
return a
|
||||
return a / norm
|
||||
|
||||
|
||||
def apply_bind_shape_matrix(bind_shape_matrix: np.ndarray, verts: np.ndarray, norms: np.ndarray) \
|
||||
-> Tuple[np.ndarray, np.ndarray]:
|
||||
"""
|
||||
Apply the bind shape matrix to the mesh data
|
||||
|
||||
glTF expects all verts and normals to be in armature-local space so that mesh data can be shared
|
||||
between differently-oriented armatures. Or something.
|
||||
# https://github.com/KhronosGroup/glTF-Blender-IO/issues/566#issuecomment-523119339
|
||||
|
||||
glTF also doesn't have a concept of a "bind shape matrix" like Collada does
|
||||
per its skinning docs, so we have to mix it into the mesh data manually.
|
||||
See https://github.com/KhronosGroup/glTF-Tutorials/blob/master/gltfTutorial/gltfTutorial_020_Skins.md
|
||||
"""
|
||||
scale, _, angles, translation, _ = transformations.decompose_matrix(bind_shape_matrix)
|
||||
scale_mat = transformations.compose_matrix(scale=scale)[:3, :3]
|
||||
rot_mat = transformations.euler_matrix(*angles)[:3, :3]
|
||||
rot_scale_mat = scale_mat @ np.linalg.inv(rot_mat)
|
||||
|
||||
# Apply the SRT transform to each vert
|
||||
verts = (verts @ rot_scale_mat) + translation
|
||||
|
||||
# Our scale is unlikely to be uniform, so we have to fix up our normals as well.
|
||||
# https://paroj.github.io/gltut/Illumination/Tut09%20Normal%20Transformation.html
|
||||
inv_transpose_mat = np.transpose(np.linalg.inv(bind_shape_matrix)[:3, :3])
|
||||
new_norms = [normalize_vec3(inv_transpose_mat @ norm) for norm in norms]
|
||||
|
||||
return verts, np.array(new_norms)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class JointContext:
|
||||
node: gltflib.Node
|
||||
# Original matrix for the bone, may have custom translation, but otherwise the same.
|
||||
orig_matrix: np.ndarray
|
||||
# xform that must be applied to inverse bind matrices to account for the changed bone
|
||||
fixup_matrix: np.ndarray
|
||||
|
||||
|
||||
JOINT_CONTEXT_DICT = Dict[str, JointContext]
|
||||
|
||||
|
||||
class GLTFBuilder:
|
||||
def __init__(self, blender_compatibility=False):
|
||||
self.scene = gltflib.Scene(nodes=IdentityList())
|
||||
self.model = gltflib.GLTFModel(
|
||||
asset=gltflib.Asset(version="2.0"),
|
||||
accessors=IdentityList(),
|
||||
nodes=IdentityList(),
|
||||
materials=IdentityList(),
|
||||
buffers=IdentityList(),
|
||||
bufferViews=IdentityList(),
|
||||
meshes=IdentityList(),
|
||||
skins=IdentityList(),
|
||||
scenes=IdentityList((self.scene,)),
|
||||
extensionsUsed=["KHR_materials_specular"],
|
||||
scene=0,
|
||||
)
|
||||
self.gltf = gltflib.GLTF(
|
||||
model=self.model,
|
||||
resources=IdentityList(),
|
||||
)
|
||||
self.blender_compatibility = blender_compatibility
|
||||
|
||||
def add_nodes_from_llmesh(self, mesh: MeshAsset, name: str, mesh_transform: Optional[np.ndarray] = None):
|
||||
"""Build a glTF version of a mesh asset, appending it and its armature to the scene root"""
|
||||
# TODO: mesh data instancing?
|
||||
# consider https://github.com/KhronosGroup/glTF-Blender-IO/issues/1634.
|
||||
if mesh_transform is None:
|
||||
mesh_transform = np.identity(4)
|
||||
|
||||
skin_seg: Optional[SkinSegmentDict] = mesh.segments.get('skin')
|
||||
skin = None
|
||||
if skin_seg:
|
||||
mesh_transform = llsd_to_mat4(skin_seg['bind_shape_matrix'])
|
||||
joint_ctxs = self.add_joints(skin_seg)
|
||||
|
||||
# Give our armature a root node and parent the pelvis to it
|
||||
armature_node = self.add_node("Armature")
|
||||
self.scene.nodes.append(self.model.nodes.index(armature_node))
|
||||
armature_node.children.append(self.model.nodes.index(joint_ctxs['mPelvis'].node))
|
||||
skin = self.add_skin("Armature", joint_ctxs, skin_seg)
|
||||
skin.skeleton = self.model.nodes.index(armature_node)
|
||||
|
||||
primitives = []
|
||||
# Just the high LOD for now
|
||||
for submesh in mesh.segments['high_lod']:
|
||||
verts = np.array(positions_from_domain(submesh['Position'], submesh['PositionDomain']))
|
||||
norms = np.array(submesh['Normal'])
|
||||
tris = np.array(submesh['TriangleList'])
|
||||
joints = np.array([])
|
||||
weights = np.array([])
|
||||
range_uv = np.array([])
|
||||
if "TexCoord0" in submesh:
|
||||
range_uv = np.array(positions_from_domain(submesh['TexCoord0'], submesh['TexCoord0Domain']))
|
||||
if 'Weights' in submesh:
|
||||
joints, weights = sl_weights_to_gltf(submesh['Weights'])
|
||||
|
||||
if skin:
|
||||
# Convert verts and norms to armature-local space
|
||||
verts, norms = apply_bind_shape_matrix(mesh_transform, verts, norms)
|
||||
|
||||
primitives.append(self.add_primitive(
|
||||
tris=tris,
|
||||
positions=verts,
|
||||
normals=norms,
|
||||
uvs=range_uv,
|
||||
joints=joints,
|
||||
weights=weights,
|
||||
))
|
||||
|
||||
mesh_node = self.add_node(
|
||||
name,
|
||||
self.add_mesh(name, primitives),
|
||||
transform=mesh_transform,
|
||||
)
|
||||
if skin:
|
||||
# Node translation isn't relevant, we're going to use the bind matrices
|
||||
# If you pull this into Blender you may want to untick "Guess Original Bind Pose",
|
||||
# it guesses that based on the inverse bind matrices which may have Maya poisoning.
|
||||
# TODO: Maybe we could automatically undo that by comparing expected bone scale and rot
|
||||
# to scale and rot in the inverse bind matrices, and applying fixups to the
|
||||
# bind shape matrix and inverse bind matrices?
|
||||
mesh_node.matrix = None
|
||||
mesh_node.skin = self.model.skins.index(skin)
|
||||
|
||||
self.scene.nodes.append(self.model.nodes.index(mesh_node))
|
||||
|
||||
def add_node(
|
||||
self,
|
||||
name: str,
|
||||
mesh: Optional[gltflib.Mesh] = None,
|
||||
transform: Optional[np.ndarray] = None,
|
||||
) -> gltflib.Node:
|
||||
node = gltflib.Node(
|
||||
name=name,
|
||||
mesh=self.model.meshes.index(mesh) if mesh else None,
|
||||
matrix=sl_mat4_to_gltf(transform) if transform is not None else None,
|
||||
children=[],
|
||||
)
|
||||
self.model.nodes.append(node)
|
||||
return node
|
||||
|
||||
def add_mesh(
|
||||
self,
|
||||
name: str,
|
||||
primitives: List[gltflib.Primitive],
|
||||
) -> gltflib.Mesh:
|
||||
for i, prim in enumerate(primitives):
|
||||
# Give the materials a name relating to what "face" they belong to
|
||||
self.model.materials[prim.material].name = f"{name}.{i:03}"
|
||||
mesh = gltflib.Mesh(name=name, primitives=primitives)
|
||||
self.model.meshes.append(mesh)
|
||||
return mesh
|
||||
|
||||
def add_primitive(
|
||||
self,
|
||||
tris: np.ndarray,
|
||||
positions: np.ndarray,
|
||||
normals: np.ndarray,
|
||||
uvs: np.ndarray,
|
||||
weights: np.ndarray,
|
||||
joints: np.ndarray,
|
||||
) -> gltflib.Primitive:
|
||||
# Make a Material for the primitive. Materials pretty much _are_ the primitives in
|
||||
# LLMesh, so just make them both in one go. We need a unique material for each primitive.
|
||||
material = gltflib.Material(
|
||||
pbrMetallicRoughness=gltflib.PBRMetallicRoughness(
|
||||
baseColorFactor=[1.0, 1.0, 1.0, 1.0],
|
||||
metallicFactor=0.0,
|
||||
roughnessFactor=0.0,
|
||||
),
|
||||
extensions={
|
||||
"KHR_materials_specular": {
|
||||
"specularFactor": 0.0,
|
||||
"specularColorFactor": [0, 0, 0]
|
||||
},
|
||||
}
|
||||
)
|
||||
self.model.materials.append(material)
|
||||
|
||||
attributes = gltflib.Attributes(
|
||||
POSITION=self.maybe_add_vec_array(sl_vec3_array_to_gltf(positions), gltflib.AccessorType.VEC3),
|
||||
NORMAL=self.maybe_add_vec_array(sl_vec3_array_to_gltf(normals), gltflib.AccessorType.VEC3),
|
||||
TEXCOORD_0=self.maybe_add_vec_array(np.array([sl_to_gltf_uv(uv) for uv in uvs]), gltflib.AccessorType.VEC2),
|
||||
JOINTS_0=self.maybe_add_vec_array(joints, gltflib.AccessorType.VEC4, gltflib.ComponentType.UNSIGNED_BYTE),
|
||||
WEIGHTS_0=self.maybe_add_vec_array(weights, gltflib.AccessorType.VEC4),
|
||||
)
|
||||
|
||||
return gltflib.Primitive(
|
||||
attributes=attributes,
|
||||
indices=self.model.accessors.index(self.add_scalars(tris)),
|
||||
material=self.model.materials.index(material),
|
||||
mode=gltflib.PrimitiveMode.TRIANGLES,
|
||||
)
|
||||
|
||||
def add_scalars(self, scalars: np.ndarray) -> gltflib.Accessor:
|
||||
"""
|
||||
Add a potentially multidimensional array of scalars, returning the accessor
|
||||
|
||||
Generally only used for triangle indices
|
||||
"""
|
||||
scalar_bytes = scalars.astype(np.uint32).flatten().tobytes()
|
||||
buffer_view = self.add_buffer_view(scalar_bytes, None)
|
||||
accessor = gltflib.Accessor(
|
||||
bufferView=self.model.bufferViews.index(buffer_view),
|
||||
componentType=gltflib.ComponentType.UNSIGNED_INT,
|
||||
count=scalars.size, # use the flattened size!
|
||||
type=gltflib.AccessorType.SCALAR.value, # type: ignore
|
||||
min=[int(scalars.min())], # type: ignore
|
||||
max=[int(scalars.max())], # type: ignore
|
||||
)
|
||||
self.model.accessors.append(accessor)
|
||||
return accessor
|
||||
|
||||
def maybe_add_vec_array(
|
||||
self,
|
||||
vecs: np.ndarray,
|
||||
vec_type: gltflib.AccessorType,
|
||||
component_type: gltflib.ComponentType = gltflib.ComponentType.FLOAT,
|
||||
) -> Optional[int]:
|
||||
if not vecs.size:
|
||||
return None
|
||||
accessor = self.add_vec_array(vecs, vec_type, component_type)
|
||||
return self.model.accessors.index(accessor)
|
||||
|
||||
def add_vec_array(
|
||||
self,
|
||||
vecs: np.ndarray,
|
||||
vec_type: gltflib.AccessorType,
|
||||
component_type: gltflib.ComponentType = gltflib.ComponentType.FLOAT
|
||||
) -> gltflib.Accessor:
|
||||
"""
|
||||
Add a two-dimensional array of vecs (positions, normals, weights, UVs) returning the accessor
|
||||
|
||||
Vec type may be a vec2, vec3, or a vec4.
|
||||
"""
|
||||
# Pretty much all of these are float32 except the ones that aren't
|
||||
dtype = np.float32
|
||||
if component_type == gltflib.ComponentType.UNSIGNED_BYTE:
|
||||
dtype = np.uint8
|
||||
vec_data = vecs.astype(dtype).tobytes()
|
||||
buffer_view = self.add_buffer_view(vec_data, target=None)
|
||||
accessor = gltflib.Accessor(
|
||||
bufferView=self.model.bufferViews.index(buffer_view),
|
||||
componentType=component_type,
|
||||
count=len(vecs),
|
||||
type=vec_type.value, # type: ignore
|
||||
min=vecs.min(axis=0).tolist(), # type: ignore
|
||||
max=vecs.max(axis=0).tolist(), # type: ignore
|
||||
)
|
||||
self.model.accessors.append(accessor)
|
||||
return accessor
|
||||
|
||||
def add_buffer_view(self, data: bytes, target: Optional[gltflib.BufferTarget]) -> gltflib.BufferView:
|
||||
"""Create a buffer view and associated buffer and resource for a blob of data"""
|
||||
resource = gltflib.FileResource(filename=f"res-{uuid.uuid4()}.bin", data=data)
|
||||
self.gltf.resources.append(resource)
|
||||
|
||||
buffer = gltflib.Buffer(uri=resource.filename, byteLength=len(resource.data))
|
||||
self.model.buffers.append(buffer)
|
||||
|
||||
buffer_view = gltflib.BufferView(
|
||||
buffer=self.model.buffers.index(buffer),
|
||||
byteLength=buffer.byteLength,
|
||||
byteOffset=0,
|
||||
target=target
|
||||
)
|
||||
self.model.bufferViews.append(buffer_view)
|
||||
return buffer_view
|
||||
|
||||
def add_joints(self, skin: SkinSegmentDict) -> JOINT_CONTEXT_DICT:
|
||||
joints: JOINT_CONTEXT_DICT = {}
|
||||
# There may be some joints not present in the mesh that we need to add to reach the mPelvis root
|
||||
required_joints = AVATAR_SKELETON.get_required_joints(skin['joint_names'])
|
||||
|
||||
# If this is present, it may override the joint positions from the skeleton definition
|
||||
if 'alt_inverse_bind_matrix' in skin:
|
||||
joint_overrides = dict(zip(skin['joint_names'], skin['alt_inverse_bind_matrix']))
|
||||
else:
|
||||
joint_overrides = {}
|
||||
|
||||
for joint_name in required_joints:
|
||||
joint = AVATAR_SKELETON[joint_name]
|
||||
joint_matrix = joint.matrix
|
||||
|
||||
# Do we have a joint position override that would affect joint_matrix?
|
||||
override = joint_overrides.get(joint_name)
|
||||
if override:
|
||||
decomp = list(transformations.decompose_matrix(joint_matrix))
|
||||
# We specifically only want the translation from the override!
|
||||
translation = transformations.translation_from_matrix(llsd_to_mat4(override))
|
||||
# Only do it if the difference is over 0.1mm though
|
||||
if Vector3.dist(Vector3(*translation), joint.translation) > 0.0001:
|
||||
decomp[3] = translation
|
||||
joint_matrix = transformations.compose_matrix(*decomp)
|
||||
|
||||
# Do we need to mess with the bone's matrices to make Blender cooperate?
|
||||
orig_matrix = joint_matrix
|
||||
fixup_matrix = np.identity(4)
|
||||
if self.blender_compatibility:
|
||||
joint_matrix, fixup_matrix = self._fix_blender_joint(joint_matrix)
|
||||
|
||||
# TODO: populate "extras" here with the metadata the Blender collada stuff uses to store
|
||||
# "bind_mat" and "rest_mat" so we can go back to our original matrices when exporting
|
||||
# from blender to .dae!
|
||||
node = self.add_node(joint_name, transform=joint_matrix)
|
||||
|
||||
# Store the node along with any fixups we may need to apply to the bind matrices later
|
||||
joints[joint_name] = JointContext(node, orig_matrix, fixup_matrix)
|
||||
|
||||
# Add each joint to the child list of their respective parent
|
||||
for joint_name, joint_ctx in joints.items():
|
||||
if parent := AVATAR_SKELETON[joint_name].parent:
|
||||
joints[parent().name].node.children.append(self.model.nodes.index(joint_ctx.node))
|
||||
return joints
|
||||
|
||||
def _fix_blender_joint(self, joint_matrix: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""
|
||||
Split a joint matrix into a joint matrix and fixup matrix
|
||||
|
||||
If we don't account for weird scaling on the collision volumes, then
|
||||
Blender freaks out. This is an issue in blender where it doesn't
|
||||
apply the inverse bind matrices relative to the scale and rotation of
|
||||
the bones themselves, as it should per the glTF spec. Blender's glTF loader
|
||||
tries to recover from this by applying certain transforms as a pose, but
|
||||
the damage has been done by that point. Nobody else runs really runs into
|
||||
this because they have the good sense to not use some nightmare abomination
|
||||
rig with scaling and rotation on the skeleton like SL does.
|
||||
|
||||
Blender will _only_ correctly handle the translation component of the joint,
|
||||
any other transforms need to be mixed into the inverse bind matrices themselves.
|
||||
There's no internal concept of bone scale or rot in Blender right now.
|
||||
|
||||
Should investigate an Avastar-style approach of optionally retargeting
|
||||
to a Blender-compatible rig with translation-only bones, and modify
|
||||
the bind matrices to accommodate. The glTF importer supports metadata through
|
||||
the "extras" fields, so we can potentially abuse the "bind_mat" metadata field
|
||||
that Blender already uses for the "Keep Bind Info" Collada import / export hack.
|
||||
|
||||
For context:
|
||||
* https://github.com/KhronosGroup/glTF-Blender-IO/issues/1305
|
||||
* https://developer.blender.org/T38660 (these are Collada, but still relevant)
|
||||
* https://developer.blender.org/T29246
|
||||
* https://developer.blender.org/T50412
|
||||
* https://developer.blender.org/T53620 (FBX but still relevant)
|
||||
"""
|
||||
scale, shear, angles, translate, projection = transformations.decompose_matrix(joint_matrix)
|
||||
joint_matrix = transformations.compose_matrix(translate=translate)
|
||||
fixup_matrix = transformations.compose_matrix(scale=scale, angles=angles)
|
||||
return joint_matrix, fixup_matrix
|
||||
|
||||
def add_skin(self, name: str, joint_nodes: JOINT_CONTEXT_DICT, skin_seg: SkinSegmentDict) -> gltflib.Skin:
|
||||
joints_arr = []
|
||||
for joint_name in skin_seg['joint_names']:
|
||||
joint_ctx = joint_nodes[joint_name]
|
||||
joints_arr.append(self.model.nodes.index(joint_ctx.node))
|
||||
|
||||
inv_binds = []
|
||||
for joint_name, inv_bind in zip(skin_seg['joint_names'], skin_seg['inverse_bind_matrix']):
|
||||
joint_ctx = joint_nodes[joint_name]
|
||||
inv_bind = joint_ctx.fixup_matrix @ llsd_to_mat4(inv_bind)
|
||||
inv_binds.append(sl_mat4_to_gltf(inv_bind))
|
||||
inv_binds_data = np.array(inv_binds, dtype=np.float32).tobytes()
|
||||
buffer_view = self.add_buffer_view(inv_binds_data, target=None)
|
||||
accessor = gltflib.Accessor(
|
||||
bufferView=self.model.bufferViews.index(buffer_view),
|
||||
componentType=gltflib.ComponentType.FLOAT,
|
||||
count=len(inv_binds),
|
||||
type=gltflib.AccessorType.MAT4.value, # type: ignore
|
||||
)
|
||||
self.model.accessors.append(accessor)
|
||||
accessor_idx = self.model.accessors.index(accessor)
|
||||
|
||||
skin = gltflib.Skin(name=name, joints=joints_arr, inverseBindMatrices=accessor_idx)
|
||||
self.model.skins.append(skin)
|
||||
return skin
|
||||
|
||||
def finalize(self):
|
||||
"""Clean up the mesh to pass the glTF smell test, should be done last"""
|
||||
def _nullify_empty_lists(dc):
|
||||
for field in dataclasses.fields(dc):
|
||||
# Empty lists should be replaced with None
|
||||
if getattr(dc, field.name) == []:
|
||||
setattr(dc, field.name, None)
|
||||
|
||||
for node in self.model.nodes:
|
||||
_nullify_empty_lists(node)
|
||||
_nullify_empty_lists(self.model)
|
||||
return self.gltf
|
||||
|
||||
|
||||
def main():
|
||||
# Take an llmesh file as an argument and spit out basename-converted.gltf
|
||||
with open(sys.argv[1], "rb") as f:
|
||||
reader = BufferReader("<", f.read())
|
||||
|
||||
filename = Path(sys.argv[1]).stem
|
||||
mesh: MeshAsset = reader.read(LLMeshSerializer(parse_segment_contents=True))
|
||||
|
||||
builder = GLTFBuilder(blender_compatibility=True)
|
||||
builder.add_nodes_from_llmesh(mesh, filename)
|
||||
gltf = builder.finalize()
|
||||
|
||||
pprint.pprint(gltf.model)
|
||||
gltf.export_glb(sys.argv[1].rsplit(".", 1)[0] + "-converted.gltf")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -117,6 +117,8 @@ class InventoryBase(SchemaBase):
|
||||
# Not meant to be serialized
|
||||
if not spec:
|
||||
continue
|
||||
if field.metadata.get("llsd_only"):
|
||||
continue
|
||||
|
||||
val = getattr(self, field_name)
|
||||
if val is None:
|
||||
@@ -166,16 +168,11 @@ class InventoryModel(InventoryBase):
|
||||
def from_llsd(cls, llsd_val: List[Dict]) -> InventoryModel:
|
||||
model = cls()
|
||||
for obj_dict in llsd_val:
|
||||
if InventoryCategory.ID_ATTR in obj_dict:
|
||||
if (obj := InventoryCategory.from_llsd(obj_dict)) is not None:
|
||||
model.add(obj)
|
||||
elif InventoryObject.ID_ATTR in obj_dict:
|
||||
if (obj := InventoryObject.from_llsd(obj_dict)) is not None:
|
||||
model.add(obj)
|
||||
elif InventoryItem.ID_ATTR in obj_dict:
|
||||
if (obj := InventoryItem.from_llsd(obj_dict)) is not None:
|
||||
model.add(obj)
|
||||
else:
|
||||
for inv_type in INVENTORY_TYPES:
|
||||
if inv_type.ID_ATTR in obj_dict:
|
||||
if (obj := inv_type.from_llsd(obj_dict)) is not None:
|
||||
model.add(obj)
|
||||
break
|
||||
LOG.warning(f"Unknown object type {obj_dict!r}")
|
||||
return model
|
||||
|
||||
@@ -218,13 +215,13 @@ class InventoryModel(InventoryBase):
|
||||
self.root = node
|
||||
node.model = weakref.proxy(self)
|
||||
|
||||
def unlink(self, node: InventoryNodeBase) -> Sequence[InventoryNodeBase]:
|
||||
def unlink(self, node: InventoryNodeBase, single_only: bool = False) -> Sequence[InventoryNodeBase]:
|
||||
"""Unlink a node and its descendants from the tree, returning the removed nodes"""
|
||||
assert node.model == self
|
||||
if node == self.root:
|
||||
self.root = None
|
||||
unlinked = [node]
|
||||
if isinstance(node, InventoryContainerBase):
|
||||
if isinstance(node, InventoryContainerBase) and not single_only:
|
||||
for child in node.children:
|
||||
unlinked.extend(self.unlink(child))
|
||||
self.nodes.pop(node.node_id, None)
|
||||
@@ -257,6 +254,15 @@ class InventoryModel(InventoryBase):
|
||||
removed=removed_in_other,
|
||||
)
|
||||
|
||||
def __getitem__(self, item: UUID) -> InventoryNodeBase:
|
||||
return self.nodes[item]
|
||||
|
||||
def __contains__(self, item: UUID):
|
||||
return item in self.nodes
|
||||
|
||||
def get(self, item: UUID) -> Optional[InventoryNodeBase]:
|
||||
return self.nodes.get(item)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class InventoryPermissions(InventoryBase):
|
||||
@@ -271,6 +277,9 @@ class InventoryPermissions(InventoryBase):
|
||||
owner_id: UUID = schema_field(SchemaUUID)
|
||||
last_owner_id: UUID = schema_field(SchemaUUID)
|
||||
group_id: UUID = schema_field(SchemaUUID)
|
||||
# Nothing actually cares about this, but it could be there.
|
||||
# It's kind of redundant since it just means owner_id == NULL_KEY && group_id != NULL_KEY.
|
||||
is_owner_group: int = schema_field(SchemaInt, default=0, llsd_only=True)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@@ -384,6 +393,7 @@ class InventoryObject(InventoryContainerBase):
|
||||
class InventoryCategory(InventoryContainerBase):
|
||||
ID_ATTR: ClassVar[str] = "cat_id"
|
||||
SCHEMA_NAME: ClassVar[str] = "inv_category"
|
||||
VERSION_NONE: ClassVar[int] = -1
|
||||
|
||||
cat_id: UUID = schema_field(SchemaUUID)
|
||||
pref_type: str = schema_field(SchemaStr, llsd_name="preferred_type")
|
||||
@@ -417,3 +427,6 @@ class InventoryItem(InventoryNodeBase):
|
||||
if self.asset_id is not None:
|
||||
return self.asset_id
|
||||
return self.shadow_id ^ MAGIC_ID
|
||||
|
||||
|
||||
INVENTORY_TYPES: Tuple[Type[InventoryNodeBase], ...] = (InventoryCategory, InventoryObject, InventoryItem)
|
||||
|
||||
@@ -111,10 +111,10 @@ class SchemaUUID(SchemaFieldSerializer[UUID]):
|
||||
|
||||
|
||||
def schema_field(spec: Type[Union[SchemaBase, SchemaFieldSerializer]], *, default=dataclasses.MISSING, init=True,
|
||||
repr=True, hash=None, compare=True, llsd_name=None) -> dataclasses.Field: # noqa
|
||||
repr=True, hash=None, compare=True, llsd_name=None, llsd_only=False) -> dataclasses.Field: # noqa
|
||||
"""Describe a field in the inventory schema and the shape of its value"""
|
||||
return dataclasses.field(
|
||||
metadata={"spec": spec, "llsd_name": llsd_name}, default=default,
|
||||
return dataclasses.field( # noqa
|
||||
metadata={"spec": spec, "llsd_name": llsd_name, "llsd_only": llsd_only}, default=default,
|
||||
init=init, repr=repr, hash=hash, compare=compare,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import datetime
|
||||
import typing
|
||||
import zlib
|
||||
|
||||
@@ -47,6 +46,12 @@ class HippoLLSDNotationFormatter(llbase.llsd.LLSDNotationFormatter, HippoLLSDBas
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def STRING(self, v):
|
||||
# llbase's notation LLSD encoder isn't suitable for generating line-delimited
|
||||
# LLSD because the string formatter leaves \n unencoded, unlike indra's llcommon.
|
||||
# Add our own escaping rule.
|
||||
return super().STRING(v).replace(b"\n", b"\\n")
|
||||
|
||||
|
||||
def format_notation(val: typing.Any):
|
||||
return HippoLLSDNotationFormatter().format(val)
|
||||
@@ -63,10 +68,9 @@ def format_binary(val: typing.Any, with_header=True):
|
||||
# With a few minor changes to make serialization round-trip correctly. It's evil.
|
||||
def _format_binary_recurse(something) -> bytes:
|
||||
"""Binary formatter workhorse."""
|
||||
def _format_list(something):
|
||||
array_builder = []
|
||||
array_builder.append(b'[' + struct.pack('!i', len(something)))
|
||||
for item in something:
|
||||
def _format_list(list_something):
|
||||
array_builder = [b'[' + struct.pack('!i', len(list_something))]
|
||||
for item in list_something:
|
||||
array_builder.append(_format_binary_recurse(item))
|
||||
array_builder.append(b']')
|
||||
return b''.join(array_builder)
|
||||
@@ -108,8 +112,7 @@ def _format_binary_recurse(something) -> bytes:
|
||||
elif isinstance(something, (list, tuple)):
|
||||
return _format_list(something)
|
||||
elif isinstance(something, dict):
|
||||
map_builder = []
|
||||
map_builder.append(b'{' + struct.pack('!i', len(something)))
|
||||
map_builder = [b'{' + struct.pack('!i', len(something))]
|
||||
for key, value in something.items():
|
||||
if isinstance(key, str):
|
||||
key = key.encode("utf8")
|
||||
|
||||
@@ -11,15 +11,25 @@ from typing import *
|
||||
import zlib
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
import recordclass
|
||||
|
||||
from hippolyzer.lib.base import serialization as se
|
||||
from hippolyzer.lib.base.datatypes import Vector3, Vector2, UUID, TupleCoord
|
||||
from hippolyzer.lib.base.llsd import zip_llsd, unzip_llsd
|
||||
from hippolyzer.lib.base.serialization import ParseContext
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def llsd_to_mat4(mat: Union[np.ndarray, Sequence[float]]) -> np.ndarray:
|
||||
return np.array(mat).reshape((4, 4), order='F')
|
||||
|
||||
|
||||
def mat4_to_llsd(mat: np.ndarray) -> List[float]:
|
||||
return list(mat.flatten(order='F'))
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class MeshAsset:
|
||||
header: MeshHeaderDict = dataclasses.field(default_factory=dict)
|
||||
@@ -255,7 +265,6 @@ def positions_to_domain(positions: Iterable[TupleCoord], domain: DomainDict):
|
||||
|
||||
class VertexWeights(se.SerializableBase):
|
||||
"""Serializer for a list of joint weights on a single vertex"""
|
||||
INFLUENCE_SER = se.QuantizedFloat(se.U16, 0.0, 1.0)
|
||||
INFLUENCE_LIMIT = 4
|
||||
INFLUENCE_TERM = 0xFF
|
||||
|
||||
@@ -266,18 +275,30 @@ class VertexWeights(se.SerializableBase):
|
||||
for val in vals:
|
||||
joint_idx, influence = val
|
||||
writer.write(se.U8, joint_idx)
|
||||
writer.write(cls.INFLUENCE_SER, influence, ctx=ctx)
|
||||
writer.write(se.U16, round(influence * 0xFFff), ctx=ctx)
|
||||
if len(vals) != cls.INFLUENCE_LIMIT:
|
||||
writer.write(se.U8, cls.INFLUENCE_TERM)
|
||||
|
||||
@classmethod
|
||||
def deserialize(cls, reader: se.Reader, ctx=None):
|
||||
# NOTE: normally you'd want to do something like arrange this into a nicely
|
||||
# aligned byte array with zero padding so that you could vectorize the decoding.
|
||||
# In cases where having a vertex with no weights is semantically equivalent to
|
||||
# having a vertex _with_ weights of a value of 0.0 that's fine. This isn't the case
|
||||
# in LL's implementation of mesh:
|
||||
#
|
||||
# https://bitbucket.org/lindenlab/viewer/src/d31a83fb946c49a38376ea3b312b5380d0c8c065/indra/llmath/llvolume.cpp#lines-2560:2628
|
||||
#
|
||||
# Consider the difference between handling of b"\x00\x00\x00\xFF" and b"\xFF" with the above logic.
|
||||
# To simplify round-tripping while preserving those semantics, we don't do a vectorized decode.
|
||||
# I had a vectorized numpy version, but those requirements made everything a bit of a mess.
|
||||
influence_list = []
|
||||
for _ in range(cls.INFLUENCE_LIMIT):
|
||||
joint_idx = reader.read(se.U8)
|
||||
joint_idx = reader.read_bytes(1)[0]
|
||||
if joint_idx == cls.INFLUENCE_TERM:
|
||||
break
|
||||
influence_list.append(VertexWeight(joint_idx, reader.read(cls.INFLUENCE_SER, ctx=ctx)))
|
||||
weight = reader.read(se.U16, ctx=ctx) / 0xFFff
|
||||
influence_list.append(VertexWeight(joint_idx, weight))
|
||||
return influence_list
|
||||
|
||||
|
||||
@@ -312,16 +333,46 @@ class SegmentSerializer:
|
||||
return new_segment
|
||||
|
||||
|
||||
class VecListAdapter(se.Adapter):
|
||||
def __init__(self, child_spec: se.SERIALIZABLE_TYPE, vec_type: Type):
|
||||
super().__init__(child_spec)
|
||||
self.vec_type = vec_type
|
||||
|
||||
def encode(self, val: Any, ctx: Optional[ParseContext]) -> Any:
|
||||
return val
|
||||
|
||||
def decode(self, val: Any, ctx: Optional[ParseContext], pod: bool = False) -> Any:
|
||||
new_vals = []
|
||||
for elem in val:
|
||||
new_vals.append(self.vec_type(*elem))
|
||||
return new_vals
|
||||
|
||||
|
||||
LE_U16: np.dtype = np.dtype(np.uint16).newbyteorder('<') # noqa
|
||||
|
||||
|
||||
LOD_SEGMENT_SERIALIZER = SegmentSerializer({
|
||||
# 16-bit indices to the verts making up the tri. Imposes a 16-bit
|
||||
# upper limit on verts in any given material in the mesh.
|
||||
"TriangleList": se.Collection(None, se.Collection(3, se.U16)),
|
||||
"TriangleList": se.ExprAdapter(
|
||||
se.NumPyArray(se.BytesGreedy(), LE_U16, 3),
|
||||
decode_func=lambda x: x.tolist(),
|
||||
),
|
||||
# These are used to interpolate between values in their respective domains
|
||||
# Each position represents a single vert.
|
||||
"Position": se.Collection(None, se.Vector3U16(0.0, 1.0)),
|
||||
"TexCoord0": se.Collection(None, se.Vector2U16(0.0, 1.0)),
|
||||
# Normals have a static domain between -1 and 1, so just use that.
|
||||
"Normal": se.Collection(None, se.Vector3U16(-1.0, 1.0)),
|
||||
"Position": VecListAdapter(
|
||||
se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), LE_U16, 3), 0.0, 1.0),
|
||||
Vector3,
|
||||
),
|
||||
"TexCoord0": VecListAdapter(
|
||||
se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), LE_U16, 2), 0.0, 1.0),
|
||||
Vector2,
|
||||
),
|
||||
# Normals have a static domain between -1 and 1, so we just use that rather than 0.0 - 1.0.
|
||||
"Normal": VecListAdapter(
|
||||
se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), LE_U16, 3), -1.0, 1.0),
|
||||
Vector3,
|
||||
),
|
||||
"Weights": se.Collection(None, VertexWeights)
|
||||
})
|
||||
|
||||
|
||||
90
hippolyzer/lib/base/mesh_skeleton.py
Normal file
90
hippolyzer/lib/base/mesh_skeleton.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import dataclasses
|
||||
import weakref
|
||||
from typing import *
|
||||
|
||||
import transformations
|
||||
from lxml import etree
|
||||
|
||||
from hippolyzer.lib.base.datatypes import Vector3, RAD_TO_DEG
|
||||
from hippolyzer.lib.base.helpers import get_resource_filename
|
||||
|
||||
|
||||
MAYBE_JOINT_REF = Optional[Callable[[], "JointNode"]]
|
||||
|
||||
|
||||
@dataclasses.dataclass(unsafe_hash=True)
|
||||
class JointNode:
|
||||
name: str
|
||||
parent: MAYBE_JOINT_REF
|
||||
translation: Vector3
|
||||
pivot: Vector3 # pivot point for the joint, generally the same as translation
|
||||
rotation: Vector3 # Euler rotation in degrees
|
||||
scale: Vector3
|
||||
type: str # bone or collision_volume
|
||||
|
||||
@property
|
||||
def matrix(self):
|
||||
return transformations.compose_matrix(
|
||||
scale=tuple(self.scale),
|
||||
angles=tuple(self.rotation / RAD_TO_DEG),
|
||||
translate=tuple(self.translation),
|
||||
)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Skeleton:
|
||||
joint_dict: Dict[str, JointNode]
|
||||
|
||||
def __getitem__(self, item: str) -> JointNode:
|
||||
return self.joint_dict[item]
|
||||
|
||||
@classmethod
|
||||
def _parse_node_children(cls, joint_dict: Dict[str, JointNode], node: etree.ElementBase, parent: MAYBE_JOINT_REF):
|
||||
name = node.get('name')
|
||||
joint = JointNode(
|
||||
name=name,
|
||||
parent=parent,
|
||||
translation=_get_vec_attr(node, "pos", Vector3()),
|
||||
pivot=_get_vec_attr(node, "pivot", Vector3()),
|
||||
rotation=_get_vec_attr(node, "rot", Vector3()),
|
||||
scale=_get_vec_attr(node, "scale", Vector3(1, 1, 1)),
|
||||
type=node.tag,
|
||||
)
|
||||
joint_dict[name] = joint
|
||||
for child in node.iterchildren():
|
||||
cls._parse_node_children(joint_dict, child, weakref.ref(joint))
|
||||
|
||||
@classmethod
|
||||
def from_xml(cls, node: etree.ElementBase):
|
||||
joint_dict = {}
|
||||
cls._parse_node_children(joint_dict, node, None)
|
||||
return cls(joint_dict)
|
||||
|
||||
def get_required_joints(self, joint_names: Collection[str]) -> Set[str]:
|
||||
"""Get all joints required to have a chain from all joints up to the root joint"""
|
||||
required = set(joint_names)
|
||||
for joint_name in joint_names:
|
||||
joint_node = self.joint_dict.get(joint_name)
|
||||
while joint_node:
|
||||
required.add(joint_node.name)
|
||||
if not joint_node.parent:
|
||||
break
|
||||
joint_node = joint_node.parent()
|
||||
return required
|
||||
|
||||
|
||||
def load_avatar_skeleton() -> Skeleton:
|
||||
skel_path = get_resource_filename("lib/base/data/avatar_skeleton.xml")
|
||||
with open(skel_path, 'r') as f:
|
||||
skel_root = etree.fromstring(f.read())
|
||||
return Skeleton.from_xml(skel_root.getchildren()[0])
|
||||
|
||||
|
||||
def _get_vec_attr(node, attr_name, default) -> Vector3:
|
||||
attr_val = node.get(attr_name, None)
|
||||
if not attr_val:
|
||||
return default
|
||||
return Vector3(*(float(x) for x in attr_val.split(" ") if x))
|
||||
|
||||
|
||||
AVATAR_SKELETON = load_avatar_skeleton()
|
||||
@@ -10,6 +10,7 @@ from io import SEEK_CUR, SEEK_SET, SEEK_END, RawIOBase, BufferedIOBase
|
||||
from typing import *
|
||||
|
||||
import lazy_object_proxy
|
||||
import numpy as np
|
||||
|
||||
import hippolyzer.lib.base.llsd as llsd
|
||||
import hippolyzer.lib.base.datatypes as dtypes
|
||||
@@ -27,6 +28,14 @@ class _Unserializable:
|
||||
return False
|
||||
|
||||
|
||||
class MissingType:
|
||||
"""Simple sentinel type like dataclasses._MISSING_TYPE"""
|
||||
pass
|
||||
|
||||
|
||||
MISSING = MissingType()
|
||||
|
||||
|
||||
UNSERIALIZABLE = _Unserializable()
|
||||
_T = TypeVar("_T")
|
||||
|
||||
@@ -288,7 +297,7 @@ class SerializableBase(abc.ABC):
|
||||
@classmethod
|
||||
def default_value(cls) -> Any:
|
||||
# None may be a valid default, so return MISSING as a sentinel val
|
||||
return dataclasses.MISSING
|
||||
return MISSING
|
||||
|
||||
|
||||
class Adapter(SerializableBase, abc.ABC):
|
||||
@@ -328,18 +337,18 @@ class ForwardSerializable(SerializableBase):
|
||||
def __init__(self, func: Callable[[], SERIALIZABLE_TYPE]):
|
||||
super().__init__()
|
||||
self._func = func
|
||||
self._wrapped = dataclasses.MISSING
|
||||
self._wrapped: Union[MissingType, SERIALIZABLE_TYPE] = MISSING
|
||||
|
||||
def _ensure_evaled(self):
|
||||
if self._wrapped is dataclasses.MISSING:
|
||||
if self._wrapped is MISSING:
|
||||
self._wrapped = self._func()
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self._wrapped, attr)
|
||||
|
||||
def default_value(self) -> Any:
|
||||
if self._wrapped is dataclasses.MISSING:
|
||||
return dataclasses.MISSING
|
||||
if self._wrapped is MISSING:
|
||||
return MISSING
|
||||
return self._wrapped.default_value()
|
||||
|
||||
def serialize(self, val, writer: BufferWriter, ctx: Optional[ParseContext]):
|
||||
@@ -357,10 +366,10 @@ class Template(SerializableBase):
|
||||
def __init__(self, template_spec: Dict[str, SERIALIZABLE_TYPE], skip_missing=False):
|
||||
self._template_spec = template_spec
|
||||
self._skip_missing = skip_missing
|
||||
self._size = dataclasses.MISSING
|
||||
self._size = MISSING
|
||||
|
||||
def calc_size(self):
|
||||
if self._size is not dataclasses.MISSING:
|
||||
if self._size is not MISSING:
|
||||
return self._size
|
||||
sum_bytes = 0
|
||||
for _, field_type in self._template_spec.items():
|
||||
@@ -830,7 +839,7 @@ class QuantizedFloat(QuantizedFloatBase):
|
||||
super().__init__(prim_spec, zero_median=False)
|
||||
self.lower = lower
|
||||
self.upper = upper
|
||||
# We know the range in `QuantizedFloat` when it's constructed, so we can infer
|
||||
# We know the range in `QuantizedFloat` when it's constructed, so we can infer
|
||||
# whether or not we should round towards zero in __init__
|
||||
max_error = (upper - lower) * self.step_mag
|
||||
midpoint = (upper + lower) / 2.0
|
||||
@@ -1196,9 +1205,9 @@ class ContextMixin(Generic[_T]):
|
||||
def _choose_option(self, ctx: Optional[ParseContext]) -> _T:
|
||||
idx = self._fun(ctx)
|
||||
if idx not in self._options:
|
||||
if dataclasses.MISSING not in self._options:
|
||||
if MISSING not in self._options:
|
||||
raise KeyError(f"{idx!r} not found in {self._options!r}")
|
||||
idx = dataclasses.MISSING
|
||||
idx = MISSING
|
||||
return self._options[idx]
|
||||
|
||||
|
||||
@@ -1442,7 +1451,7 @@ class StringEnumAdapter(Adapter):
|
||||
class FixedPoint(SerializableBase):
|
||||
def __init__(self, ser_spec, int_bits, frac_bits, signed=False):
|
||||
# Should never be used due to how this handles signs :/
|
||||
assert(not ser_spec.is_signed)
|
||||
assert (not ser_spec.is_signed)
|
||||
|
||||
self._ser_spec: SerializablePrimitive = ser_spec
|
||||
self._signed = signed
|
||||
@@ -1452,7 +1461,7 @@ class FixedPoint(SerializableBase):
|
||||
self._min_val = ((1 << int_bits) * -1) if signed else 0
|
||||
self._max_val = 1 << int_bits
|
||||
|
||||
assert(required_bits == (ser_spec.calc_size() * 8))
|
||||
assert (required_bits == (ser_spec.calc_size() * 8))
|
||||
|
||||
def deserialize(self, reader: Reader, ctx):
|
||||
fixed_val = float(self._ser_spec.deserialize(reader, ctx))
|
||||
@@ -1482,8 +1491,8 @@ def _make_undefined_raiser():
|
||||
return f
|
||||
|
||||
|
||||
def dataclass_field(spec: Union[SERIALIZABLE_TYPE, Callable], *, default=dataclasses.MISSING,
|
||||
default_factory=dataclasses.MISSING, init=True, repr=True, # noqa
|
||||
def dataclass_field(spec: Union[SERIALIZABLE_TYPE, Callable], *, default: Any = dataclasses.MISSING,
|
||||
default_factory: Any = dataclasses.MISSING, init=True, repr=True, # noqa
|
||||
hash=None, compare=True) -> dataclasses.Field: # noqa
|
||||
enrich_factory = False
|
||||
# Lambda, need to defer evaluation of spec until it's actually used.
|
||||
@@ -1504,7 +1513,7 @@ def dataclass_field(spec: Union[SERIALIZABLE_TYPE, Callable], *, default=datacla
|
||||
metadata={"spec": spec}, default=default, default_factory=default_factory, init=init,
|
||||
repr=repr, hash=hash, compare=compare
|
||||
)
|
||||
# Need to stuff this on so it knows which field went unspecified.
|
||||
# Need to stuff this on, so it knows which field went unspecified.
|
||||
if enrich_factory:
|
||||
default_factory.field = field
|
||||
return field
|
||||
@@ -1602,7 +1611,9 @@ class BitfieldDataclass(DataclassAdapter):
|
||||
|
||||
|
||||
class ExprAdapter(Adapter):
|
||||
def __init__(self, child_spec: SERIALIZABLE_TYPE, decode_func: Callable, encode_func: Callable):
|
||||
_ID = lambda x: x
|
||||
|
||||
def __init__(self, child_spec: SERIALIZABLE_TYPE, decode_func: Callable = _ID, encode_func: Callable = _ID):
|
||||
super().__init__(child_spec)
|
||||
self._decode_func = decode_func
|
||||
self._encode_func = encode_func
|
||||
@@ -1651,6 +1662,62 @@ class BinaryLLSD(SerializableBase):
|
||||
writer.write_bytes(llsd.format_binary(val, with_header=False))
|
||||
|
||||
|
||||
class NumPyArray(Adapter):
|
||||
"""
|
||||
An 2-dimensional, dynamic-length array of data from numpy. Greedy.
|
||||
|
||||
Unlike most other serializers, your endianness _must_ be specified in the dtype!
|
||||
"""
|
||||
__slots__ = ['dtype', 'elems']
|
||||
|
||||
def __init__(self, child_spec: Optional[SERIALIZABLE_TYPE], dtype: np.dtype, elems: int):
|
||||
super().__init__(child_spec)
|
||||
self.dtype = dtype
|
||||
self.elems = elems
|
||||
|
||||
def _pick_dtype(self, endian: str) -> np.dtype:
|
||||
return self.dtype.newbyteorder('>') if endian != "<" else self.dtype
|
||||
|
||||
def decode(self, val: Any, ctx: Optional[ParseContext], pod: bool = False) -> Any:
|
||||
num_elems = len(val) // self.dtype.itemsize
|
||||
num_ndims = num_elems // self.elems
|
||||
buf_array = np.frombuffer(val, dtype=self.dtype, count=num_elems)
|
||||
return buf_array.reshape((num_ndims, self.elems))
|
||||
|
||||
def encode(self, val, ctx: Optional[ParseContext]) -> Any:
|
||||
val: np.ndarray = np.array(val, dtype=self.dtype).flatten()
|
||||
return val.tobytes()
|
||||
|
||||
|
||||
class QuantizedNumPyArray(Adapter):
|
||||
"""Like QuantizedFloat. Only works correctly for unsigned types, no zero midpoint rounding!"""
|
||||
def __init__(self, child_spec: NumPyArray, lower: float, upper: float):
|
||||
super().__init__(child_spec)
|
||||
self.dtype = child_spec.dtype
|
||||
self.lower = lower
|
||||
self.upper = upper
|
||||
self.step_mag = 1.0 / ((2 ** (self.dtype.itemsize * 8)) - 1)
|
||||
|
||||
def encode(self, val: Any, ctx: Optional[ParseContext]) -> Any:
|
||||
val = np.array(val, dtype=np.float64)
|
||||
val = np.clip(val, self.lower, self.upper)
|
||||
delta = self.upper - self.lower
|
||||
if delta == 0.0:
|
||||
return np.zeros(val.shape, dtype=self.dtype)
|
||||
|
||||
val -= self.lower
|
||||
val /= delta
|
||||
val /= self.step_mag
|
||||
return np.rint(val).astype(self.dtype)
|
||||
|
||||
def decode(self, val: Any, ctx: Optional[ParseContext], pod: bool = False) -> Any:
|
||||
val = val.astype(np.float64)
|
||||
val *= self.step_mag
|
||||
val *= self.upper - self.lower
|
||||
val += self.lower
|
||||
return val
|
||||
|
||||
|
||||
def subfield_serializer(msg_name, block_name, var_name):
|
||||
def f(orig_cls):
|
||||
global SUBFIELD_SERIALIZERS
|
||||
|
||||
@@ -5,9 +5,6 @@ Serialization templates for structures used in LLUDP and HTTP bodies.
|
||||
import abc
|
||||
import collections
|
||||
import dataclasses
|
||||
import enum
|
||||
import importlib
|
||||
import logging
|
||||
import math
|
||||
import zlib
|
||||
from typing import *
|
||||
@@ -17,11 +14,6 @@ from hippolyzer.lib.base import llsd
|
||||
from hippolyzer.lib.base.datatypes import UUID, IntEnum, IntFlag, Vector3
|
||||
from hippolyzer.lib.base.namevalue import NameValuesSerializer
|
||||
|
||||
try:
|
||||
importlib.reload(se) # type: ignore
|
||||
except:
|
||||
logging.exception("Failed to reload serialization lib")
|
||||
|
||||
|
||||
@se.enum_field_serializer("RequestXfer", "XferID", "VFileType")
|
||||
@se.enum_field_serializer("AssetUploadRequest", "AssetBlock", "Type")
|
||||
@@ -143,6 +135,7 @@ class InventoryType(IntEnum):
|
||||
lower = self.name.lower()
|
||||
return {
|
||||
"callingcard": "callcard",
|
||||
"none": "-1",
|
||||
}.get(lower, lower)
|
||||
|
||||
|
||||
@@ -359,10 +352,10 @@ class PermissionType(IntEnum):
|
||||
@se.enum_field_serializer("TransferRequest", "TransferInfo", "SourceType")
|
||||
class TransferSourceType(IntEnum):
|
||||
UNKNOWN = 0
|
||||
FILE = enum.auto()
|
||||
ASSET = enum.auto()
|
||||
SIM_INV_ITEM = enum.auto()
|
||||
SIM_ESTATE = enum.auto()
|
||||
FILE = 1
|
||||
ASSET = 2
|
||||
SIM_INV_ITEM = 3
|
||||
SIM_ESTATE = 4
|
||||
|
||||
|
||||
class EstateAssetType(IntEnum):
|
||||
@@ -425,15 +418,15 @@ class TransferParamsSerializer(se.EnumSwitchedSubfieldSerializer):
|
||||
@se.enum_field_serializer("TransferInfo", "TransferInfo", "ChannelType")
|
||||
class TransferChannelType(IntEnum):
|
||||
UNKNOWN = 0
|
||||
MISC = enum.auto()
|
||||
ASSET = enum.auto()
|
||||
MISC = 1
|
||||
ASSET = 2
|
||||
|
||||
|
||||
@se.enum_field_serializer("TransferInfo", "TransferInfo", "TargetType")
|
||||
class TransferTargetType(IntEnum):
|
||||
UNKNOWN = 0
|
||||
FILE = enum.auto()
|
||||
VFILE = enum.auto()
|
||||
FILE = 1
|
||||
VFILE = 2
|
||||
|
||||
|
||||
@se.enum_field_serializer("TransferInfo", "TransferInfo", "Status")
|
||||
@@ -540,45 +533,45 @@ class SendXferPacketIDSerializer(se.AdapterSubfieldSerializer):
|
||||
@se.enum_field_serializer("ViewerEffect", "Effect", "Type")
|
||||
class ViewerEffectType(IntEnum):
|
||||
TEXT = 0
|
||||
ICON = enum.auto()
|
||||
CONNECTOR = enum.auto()
|
||||
FLEXIBLE_OBJECT = enum.auto()
|
||||
ANIMAL_CONTROLS = enum.auto()
|
||||
LOCAL_ANIMATION_OBJECT = enum.auto()
|
||||
CLOTH = enum.auto()
|
||||
EFFECT_BEAM = enum.auto()
|
||||
EFFECT_GLOW = enum.auto()
|
||||
EFFECT_POINT = enum.auto()
|
||||
EFFECT_TRAIL = enum.auto()
|
||||
EFFECT_SPHERE = enum.auto()
|
||||
EFFECT_SPIRAL = enum.auto()
|
||||
EFFECT_EDIT = enum.auto()
|
||||
EFFECT_LOOKAT = enum.auto()
|
||||
EFFECT_POINTAT = enum.auto()
|
||||
EFFECT_VOICE_VISUALIZER = enum.auto()
|
||||
NAME_TAG = enum.auto()
|
||||
EFFECT_BLOB = enum.auto()
|
||||
ICON = 1
|
||||
CONNECTOR = 2
|
||||
FLEXIBLE_OBJECT = 3
|
||||
ANIMAL_CONTROLS = 4
|
||||
LOCAL_ANIMATION_OBJECT = 5
|
||||
CLOTH = 6
|
||||
EFFECT_BEAM = 7
|
||||
EFFECT_GLOW = 8
|
||||
EFFECT_POINT = 9
|
||||
EFFECT_TRAIL = 10
|
||||
EFFECT_SPHERE = 11
|
||||
EFFECT_SPIRAL = 12
|
||||
EFFECT_EDIT = 13
|
||||
EFFECT_LOOKAT = 14
|
||||
EFFECT_POINTAT = 15
|
||||
EFFECT_VOICE_VISUALIZER = 16
|
||||
NAME_TAG = 17
|
||||
EFFECT_BLOB = 18
|
||||
|
||||
|
||||
class LookAtTarget(IntEnum):
|
||||
NONE = 0
|
||||
IDLE = enum.auto()
|
||||
AUTO_LISTEN = enum.auto()
|
||||
FREELOOK = enum.auto()
|
||||
RESPOND = enum.auto()
|
||||
HOVER = enum.auto()
|
||||
CONVERSATION = enum.auto()
|
||||
SELECT = enum.auto()
|
||||
FOCUS = enum.auto()
|
||||
MOUSELOOK = enum.auto()
|
||||
CLEAR = enum.auto()
|
||||
IDLE = 1
|
||||
AUTO_LISTEN = 2
|
||||
FREELOOK = 3
|
||||
RESPOND = 4
|
||||
HOVER = 5
|
||||
CONVERSATION = 6
|
||||
SELECT = 7
|
||||
FOCUS = 8
|
||||
MOUSELOOK = 9
|
||||
CLEAR = 10
|
||||
|
||||
|
||||
class PointAtTarget(IntEnum):
|
||||
NONE = 0
|
||||
SELECT = enum.auto()
|
||||
GRAB = enum.auto()
|
||||
CLEAR = enum.auto()
|
||||
SELECT = 1
|
||||
GRAB = 2
|
||||
CLEAR = 3
|
||||
|
||||
|
||||
@se.subfield_serializer("ViewerEffect", "Effect", "TypeData")
|
||||
@@ -943,7 +936,7 @@ class ObjectStateAdapter(se.ContextAdapter):
|
||||
PCode.AVATAR: se.IntFlag(AgentState),
|
||||
PCode.PRIMITIVE: AttachmentStateAdapter(None),
|
||||
# Other cases are probably just a number (tree species ID or something.)
|
||||
dataclasses.MISSING: se.IdentityAdapter(),
|
||||
se.MISSING: se.IdentityAdapter(),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1146,9 +1139,15 @@ class TEExceptionField(se.SerializableBase):
|
||||
return dict
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_TE_FIELD_KEY = Optional[Sequence[int]]
|
||||
_TE_DICT = Dict[_TE_FIELD_KEY, _T]
|
||||
|
||||
|
||||
def _te_field(spec: se.SERIALIZABLE_TYPE, first=False, optional=False,
|
||||
default_factory=dataclasses.MISSING, default=dataclasses.MISSING):
|
||||
if default_factory is not dataclasses.MISSING:
|
||||
default_factory: Union[se.MissingType, Callable[[], _T]] = se.MISSING,
|
||||
default: Union[se.MissingType, _T] = se.MISSING):
|
||||
if default_factory is not se.MISSING:
|
||||
new_default_factory = lambda: {None: default_factory()}
|
||||
elif default is not None:
|
||||
new_default_factory = lambda: {None: default}
|
||||
@@ -1160,9 +1159,6 @@ def _te_field(spec: se.SERIALIZABLE_TYPE, first=False, optional=False,
|
||||
)
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_TE_FIELD_KEY = Optional[Sequence[int]]
|
||||
|
||||
# If this seems weird it's because it is. TE offsets are S16s with `0` as the actual 0
|
||||
# point, and LL divides by `0x7FFF` to convert back to float. Negative S16s can
|
||||
# actually go to -0x8000 due to two's complement, creating a larger range for negatives.
|
||||
@@ -1221,22 +1217,22 @@ MAX_TES = 45
|
||||
|
||||
@dataclasses.dataclass
|
||||
class TextureEntryCollection:
|
||||
Textures: Dict[_TE_FIELD_KEY, UUID] = _te_field(
|
||||
Textures: _TE_DICT[UUID] = _te_field(
|
||||
# Plywood texture
|
||||
se.UUID, first=True, default=UUID('89556747-24cb-43ed-920b-47caed15465f'))
|
||||
# Bytes are inverted so fully opaque white is \x00\x00\x00\x00
|
||||
Color: Dict[_TE_FIELD_KEY, bytes] = _te_field(Color4(invert_bytes=True), default=b"\xff\xff\xff\xff")
|
||||
ScalesS: Dict[_TE_FIELD_KEY, float] = _te_field(se.F32, default=1.0)
|
||||
ScalesT: Dict[_TE_FIELD_KEY, float] = _te_field(se.F32, default=1.0)
|
||||
OffsetsS: Dict[_TE_FIELD_KEY, float] = _te_field(TE_S16_COORD, default=0.0)
|
||||
OffsetsT: Dict[_TE_FIELD_KEY, float] = _te_field(TE_S16_COORD, default=0.0)
|
||||
Rotation: Dict[_TE_FIELD_KEY, float] = _te_field(PackedTERotation(), default=0.0)
|
||||
BasicMaterials: Dict[_TE_FIELD_KEY, "BasicMaterials"] = _te_field(
|
||||
Color: _TE_DICT[bytes] = _te_field(Color4(invert_bytes=True), default=b"\xff\xff\xff\xff")
|
||||
ScalesS: _TE_DICT[float] = _te_field(se.F32, default=1.0)
|
||||
ScalesT: _TE_DICT[float] = _te_field(se.F32, default=1.0)
|
||||
OffsetsS: _TE_DICT[float] = _te_field(TE_S16_COORD, default=0.0)
|
||||
OffsetsT: _TE_DICT[float] = _te_field(TE_S16_COORD, default=0.0)
|
||||
Rotation: _TE_DICT[float] = _te_field(PackedTERotation(), default=0.0)
|
||||
BasicMaterials: _TE_DICT["BasicMaterials"] = _te_field(
|
||||
BUMP_SHINY_FULLBRIGHT, default_factory=BasicMaterials,
|
||||
)
|
||||
MediaFlags: Dict[_TE_FIELD_KEY, "MediaFlags"] = _te_field(MEDIA_FLAGS, default_factory=MediaFlags)
|
||||
Glow: Dict[_TE_FIELD_KEY, float] = _te_field(se.QuantizedFloat(se.U8, 0.0, 1.0), default=0.0)
|
||||
Materials: Dict[_TE_FIELD_KEY, UUID] = _te_field(se.UUID, optional=True, default=UUID.ZERO)
|
||||
MediaFlags: _TE_DICT["MediaFlags"] = _te_field(MEDIA_FLAGS, default_factory=MediaFlags)
|
||||
Glow: _TE_DICT[float] = _te_field(se.QuantizedFloat(se.U8, 0.0, 1.0), default=0.0)
|
||||
Materials: _TE_DICT[UUID] = _te_field(se.UUID, optional=True, default=UUID.ZERO)
|
||||
|
||||
def unwrap(self):
|
||||
"""Return `self` regardless of whether this is lazy wrapped object or not"""
|
||||
@@ -1733,28 +1729,28 @@ class NameValueSerializer(se.SimpleSubfieldSerializer):
|
||||
@se.enum_field_serializer("SetFollowCamProperties", "CameraProperty", "Type")
|
||||
class CameraPropertyType(IntEnum):
|
||||
PITCH = 0
|
||||
FOCUS_OFFSET = enum.auto()
|
||||
FOCUS_OFFSET_X = enum.auto()
|
||||
FOCUS_OFFSET_Y = enum.auto()
|
||||
FOCUS_OFFSET_Z = enum.auto()
|
||||
POSITION_LAG = enum.auto()
|
||||
FOCUS_LAG = enum.auto()
|
||||
DISTANCE = enum.auto()
|
||||
BEHINDNESS_ANGLE = enum.auto()
|
||||
BEHINDNESS_LAG = enum.auto()
|
||||
POSITION_THRESHOLD = enum.auto()
|
||||
FOCUS_THRESHOLD = enum.auto()
|
||||
ACTIVE = enum.auto()
|
||||
POSITION = enum.auto()
|
||||
POSITION_X = enum.auto()
|
||||
POSITION_Y = enum.auto()
|
||||
POSITION_Z = enum.auto()
|
||||
FOCUS = enum.auto()
|
||||
FOCUS_X = enum.auto()
|
||||
FOCUS_Y = enum.auto()
|
||||
FOCUS_Z = enum.auto()
|
||||
POSITION_LOCKED = enum.auto()
|
||||
FOCUS_LOCKED = enum.auto()
|
||||
FOCUS_OFFSET = 1
|
||||
FOCUS_OFFSET_X = 2
|
||||
FOCUS_OFFSET_Y = 3
|
||||
FOCUS_OFFSET_Z = 4
|
||||
POSITION_LAG = 5
|
||||
FOCUS_LAG = 6
|
||||
DISTANCE = 7
|
||||
BEHINDNESS_ANGLE = 8
|
||||
BEHINDNESS_LAG = 9
|
||||
POSITION_THRESHOLD = 10
|
||||
FOCUS_THRESHOLD = 11
|
||||
ACTIVE = 12
|
||||
POSITION = 13
|
||||
POSITION_X = 14
|
||||
POSITION_Y = 15
|
||||
POSITION_Z = 16
|
||||
FOCUS = 17
|
||||
FOCUS_X = 18
|
||||
FOCUS_Y = 19
|
||||
FOCUS_Z = 20
|
||||
POSITION_LOCKED = 21
|
||||
FOCUS_LOCKED = 22
|
||||
|
||||
|
||||
@se.enum_field_serializer("DeRezObject", "AgentBlock", "Destination")
|
||||
@@ -1869,30 +1865,33 @@ class GroupPowerFlags(IntFlag):
|
||||
# Roles
|
||||
ROLE_CREATE = 1 << 4 # Create new roles
|
||||
ROLE_DELETE = 1 << 5 # Delete roles
|
||||
ROLE_PROPERTIES = 1 << 6 # Change Role Names, Titles, and Descriptions (Of roles the user is in, only, or any role in group?)
|
||||
ROLE_PROPERTIES = 1 << 6 # Change Role Names, Titles, and Descriptions
|
||||
ROLE_ASSIGN_MEMBER_LIMITED = 1 << 7 # Assign Member to a Role that the assigner is in
|
||||
ROLE_ASSIGN_MEMBER = 1 << 8 # Assign Member to Role
|
||||
ROLE_REMOVE_MEMBER = 1 << 9 # Remove Member from Role
|
||||
ROLE_CHANGE_ACTIONS = 1 << 10 # Change actions a role can perform
|
||||
|
||||
# Group Identity
|
||||
GROUP_CHANGE_IDENTITY = 1 << 11 # Charter, insignia, 'Show In Group List', 'Publish on the web', 'Mature', all 'Show Member In Group Profile' checkboxes
|
||||
GROUP_CHANGE_IDENTITY = 1 << 11 # Charter, insignia, 'Show In Group List', 'Publish on the web', 'Mature', etc.
|
||||
|
||||
# Parcel Management
|
||||
LAND_DEED = 1 << 12 # Deed Land and Buy Land for Group
|
||||
LAND_RELEASE = 1 << 13 # Release Land (to Gov. Linden)
|
||||
LAND_SET_SALE_INFO = 1 << 14 # Set for sale info (Toggle "For Sale", Set Price, Set Target, Toggle "Sell objects with the land")
|
||||
# Set for sale info (Toggle "For Sale", Set Price, Set Target, Toggle "Sell objects with the land")
|
||||
LAND_SET_SALE_INFO = 1 << 14
|
||||
LAND_DIVIDE_JOIN = 1 << 15 # Divide and Join Parcels
|
||||
|
||||
# Parcel Identity
|
||||
LAND_FIND_PLACES = 1 << 17 # Toggle "Show in Find Places" and Set Category.
|
||||
LAND_CHANGE_IDENTITY = 1 << 18 # Change Parcel Identity: Parcel Name, Parcel Description, Snapshot, 'Publish on the web', and 'Mature' checkbox
|
||||
# Change Parcel Identity: Parcel Name, Parcel Description, Snapshot, 'Publish on the web', and 'Mature' checkbox
|
||||
LAND_CHANGE_IDENTITY = 1 << 18
|
||||
LAND_SET_LANDING_POINT = 1 << 19 # Set Landing Point
|
||||
|
||||
# Parcel Settings
|
||||
LAND_CHANGE_MEDIA = 1 << 20 # Change Media Settings
|
||||
LAND_EDIT = 1 << 21 # Toggle Edit Land
|
||||
LAND_OPTIONS = 1 << 22 # Toggle Set Home Point, Fly, Outside Scripts, Create/Edit Objects, Landmark, and Damage checkboxes
|
||||
# Toggle Set Home Point, Fly, Outside Scripts, Create/Edit Objects, Landmark, and Damage checkboxes
|
||||
LAND_OPTIONS = 1 << 22
|
||||
|
||||
# Parcel Powers
|
||||
LAND_ALLOW_EDIT_LAND = 1 << 23 # Bypass Edit Land Restriction
|
||||
|
||||
@@ -35,9 +35,8 @@ class VisualParam:
|
||||
|
||||
|
||||
class VisualParams(List[VisualParam]):
|
||||
def __init__(self):
|
||||
def __init__(self, lad_path):
|
||||
super().__init__()
|
||||
lad_path = get_resource_filename("lib/base/data/avatar_lad.xml")
|
||||
with open(lad_path, "rb") as f:
|
||||
doc = parse_etree(f)
|
||||
for param in doc.findall(".//param"):
|
||||
@@ -59,8 +58,11 @@ class VisualParams(List[VisualParam]):
|
||||
def by_wearable(self, wearable: str) -> List[VisualParam]:
|
||||
return [x for x in self if x.wearable == wearable]
|
||||
|
||||
def by_id(self, vparam_id: int) -> VisualParam:
|
||||
return [x for x in self if x.id == vparam_id][0]
|
||||
|
||||
VISUAL_PARAMS = VisualParams()
|
||||
|
||||
VISUAL_PARAMS = VisualParams(get_resource_filename("lib/base/data/avatar_lad.xml"))
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import NamedTuple, Union, Optional
|
||||
from typing import NamedTuple, Union, Optional, List
|
||||
|
||||
import hippolyzer.lib.base.serialization as se
|
||||
from hippolyzer.lib.base import llsd
|
||||
@@ -18,6 +18,11 @@ class UploadToken(NamedTuple):
|
||||
payload: bytes
|
||||
|
||||
|
||||
class MeshUploadDetails(NamedTuple):
|
||||
mesh_bytes: bytes
|
||||
num_faces: int
|
||||
|
||||
|
||||
class AssetUploader:
|
||||
def __init__(self, region: BaseClientRegion):
|
||||
self._region = region
|
||||
@@ -69,20 +74,15 @@ class AssetUploader:
|
||||
"""
|
||||
pass
|
||||
|
||||
# The mesh upload flow is a little special, so it gets its own methods
|
||||
async def initiate_mesh_upload(self, name: str, mesh: Union[bytes, MeshAsset],
|
||||
# The mesh upload flow is a little special, so it gets its own method
|
||||
async def initiate_mesh_upload(self, name: str, mesh: Union[MeshUploadDetails, MeshAsset],
|
||||
flags: Optional[int] = None) -> UploadToken:
|
||||
"""
|
||||
Very basic LL-serialized mesh uploader
|
||||
|
||||
Currently only handles a single mesh with a single face and no associated textures.
|
||||
"""
|
||||
if isinstance(mesh, MeshAsset):
|
||||
writer = se.BufferWriter("!")
|
||||
writer.write(LLMeshSerializer(), mesh)
|
||||
mesh = writer.copy_buffer()
|
||||
mesh = MeshUploadDetails(writer.copy_buffer(), len(mesh.segments['high_lod']))
|
||||
|
||||
asset_resources = self._build_asset_resources(name, mesh)
|
||||
asset_resources = self._build_asset_resources(name, [mesh])
|
||||
payload = {
|
||||
'asset_resources': asset_resources,
|
||||
'asset_type': 'mesh',
|
||||
@@ -102,26 +102,26 @@ class AssetUploader:
|
||||
upload_body = llsd.format_xml(asset_resources)
|
||||
return UploadToken(resp_payload["upload_price"], resp_payload["uploader"], upload_body)
|
||||
|
||||
def _build_asset_resources(self, name: str, mesh: bytes) -> dict:
|
||||
def _build_asset_resources(self, name: str, meshes: List[MeshUploadDetails]) -> dict:
|
||||
instances = []
|
||||
for mesh in meshes:
|
||||
instances.append({
|
||||
'face_list': [{
|
||||
'diffuse_color': [1.0, 1.0, 1.0, 1.0],
|
||||
'fullbright': False
|
||||
}] * mesh.num_faces,
|
||||
'material': 3,
|
||||
'mesh': 0,
|
||||
'mesh_name': name,
|
||||
'physics_shape_type': 2,
|
||||
'position': [0.0, 0.0, 0.0],
|
||||
'rotation': [0.7071067690849304, 0.0, 0.0, 0.7071067690849304],
|
||||
'scale': [1.0, 1.0, 1.0]
|
||||
})
|
||||
|
||||
return {
|
||||
'instance_list': [
|
||||
{
|
||||
'face_list': [
|
||||
{
|
||||
'diffuse_color': [1.0, 1.0, 1.0, 1.0],
|
||||
'fullbright': False
|
||||
}
|
||||
],
|
||||
'material': 3,
|
||||
'mesh': 0,
|
||||
'mesh_name': name,
|
||||
'physics_shape_type': 2,
|
||||
'position': [0.0, 0.0, 0.0],
|
||||
'rotation': [0.7071067690849304, 0.0, 0.0, 0.7071067690849304],
|
||||
'scale': [1.0, 1.0, 1.0]
|
||||
}
|
||||
],
|
||||
'mesh_list': [mesh],
|
||||
'instance_list': instances,
|
||||
'mesh_list': [mesh.mesh_bytes for mesh in meshes],
|
||||
'metric': 'MUT_Unspecified',
|
||||
'texture_list': []
|
||||
}
|
||||
|
||||
192
hippolyzer/lib/client/inventory_manager.py
Normal file
192
hippolyzer/lib/client/inventory_manager.py
Normal file
@@ -0,0 +1,192 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import gzip
|
||||
import logging
|
||||
import secrets
|
||||
from pathlib import Path
|
||||
from typing import Union, List, Tuple, Set
|
||||
|
||||
from hippolyzer.lib.base import llsd
|
||||
from hippolyzer.lib.base.datatypes import UUID
|
||||
from hippolyzer.lib.base.inventory import InventoryModel, InventoryCategory, InventoryItem
|
||||
from hippolyzer.lib.base.message.message import Block
|
||||
from hippolyzer.lib.client.state import BaseClientSession
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InventoryManager:
|
||||
def __init__(self, session: BaseClientSession):
|
||||
self._session = session
|
||||
self.model: InventoryModel = InventoryModel()
|
||||
self._load_skeleton()
|
||||
|
||||
def _load_skeleton(self):
|
||||
assert not self.model.nodes
|
||||
skel_cats: List[dict] = self._session.login_data.get('inventory-skeleton', [])
|
||||
for skel_cat in skel_cats:
|
||||
self.model.add(InventoryCategory(
|
||||
name=skel_cat["name"],
|
||||
cat_id=UUID(skel_cat["folder_id"]),
|
||||
parent_id=UUID(skel_cat["parent_id"]),
|
||||
# Don't use the version from the skeleton, this flags the inventory as needing
|
||||
# completion from the inventory cache. This matches indra's behavior.
|
||||
version=InventoryCategory.VERSION_NONE,
|
||||
type="category",
|
||||
pref_type=skel_cat.get("type_default", -1),
|
||||
owner_id=self._session.agent_id,
|
||||
))
|
||||
|
||||
def load_cache(self, path: Union[str, Path]):
|
||||
# Per indra, rough flow for loading inv on login is:
|
||||
# 1. Look at inventory skeleton from login response
|
||||
# 2. Pre-populate model with categories from the skeleton, including their versions
|
||||
# 3. Read the inventory cache, tracking categories and items separately
|
||||
# 4. Walk the list of categories in our cache. If the cat exists in the skeleton and the versions
|
||||
# match, then we may load the category and its descendants from cache.
|
||||
# 5. Any categories in the skeleton but not in the cache, or those with mismatched versions must be fetched.
|
||||
# The viewer does this by setting the local version of the cats to -1 and forcing a descendent fetch
|
||||
# over AIS.
|
||||
#
|
||||
# By the time you call this function call, you should have already loaded the inventory skeleton
|
||||
# into the model set its inventory category versions to VERSION_NONE.
|
||||
|
||||
skel_cats: List[dict] = self._session.login_data['inventory-skeleton']
|
||||
# UUID -> version map for inventory skeleton
|
||||
skel_versions = {UUID(cat["folder_id"]): cat["version"] for cat in skel_cats}
|
||||
LOG.info(f"Parsing inv cache at {path}")
|
||||
cached_categories, cached_items = self._parse_cache(path)
|
||||
LOG.info(f"Done parsing inv cache at {path}")
|
||||
loaded_cat_ids: Set[UUID] = set()
|
||||
|
||||
for cached_cat in cached_categories:
|
||||
existing_cat: InventoryCategory = self.model.get(cached_cat.cat_id) # noqa
|
||||
# Don't clobber an existing cat unless it just has a placeholder version,
|
||||
# maybe from loading the skeleton?
|
||||
if existing_cat and existing_cat.version != InventoryCategory.VERSION_NONE:
|
||||
continue
|
||||
# Cached cat isn't the same as what the inv server says it should be, can't use it.
|
||||
if cached_cat.version != skel_versions.get(cached_cat.cat_id):
|
||||
continue
|
||||
if existing_cat:
|
||||
# Remove the category so that we can replace it, but leave any children in place
|
||||
self.model.unlink(existing_cat, single_only=True)
|
||||
self.model.add(cached_cat)
|
||||
# Any items in this category in our cache file are usable and should be added
|
||||
loaded_cat_ids.add(cached_cat.cat_id)
|
||||
|
||||
for cached_item in cached_items:
|
||||
# The skeleton doesn't have any items, so if we run into any items they should be exactly the
|
||||
# same as what we're trying to add. No point clobbering.
|
||||
if cached_item.item_id in self.model:
|
||||
continue
|
||||
# The parent category didn't have a cache hit against the inventory skeleton, can't add!
|
||||
if cached_item.parent_id not in loaded_cat_ids:
|
||||
continue
|
||||
self.model.add(cached_item)
|
||||
|
||||
def _parse_cache(self, path: Union[str, Path]) -> Tuple[List[InventoryCategory], List[InventoryItem]]:
|
||||
categories: List[InventoryCategory] = []
|
||||
items: List[InventoryItem] = []
|
||||
# Parse our cached items and categories out of the compressed inventory cache
|
||||
first_line = True
|
||||
with gzip.open(path, "rb") as f:
|
||||
# Line-delimited LLSD notation!
|
||||
for line in f.readlines():
|
||||
# TODO: Parsing of invcache is dominated by `parse_notation()`. It's stupidly inefficient.
|
||||
node_llsd = llsd.parse_notation(line)
|
||||
if first_line:
|
||||
# First line is the file header
|
||||
first_line = False
|
||||
if node_llsd['inv_cache_version'] != 2:
|
||||
raise ValueError(f"Unknown cache version: {node_llsd!r}")
|
||||
continue
|
||||
|
||||
if InventoryCategory.ID_ATTR in node_llsd:
|
||||
if (cat_node := InventoryCategory.from_llsd(node_llsd)) is not None:
|
||||
categories.append(cat_node)
|
||||
elif InventoryItem.ID_ATTR in node_llsd:
|
||||
if (item_node := InventoryItem.from_llsd(node_llsd)) is not None:
|
||||
items.append(item_node)
|
||||
else:
|
||||
LOG.warning(f"Unknown node type in inv cache: {node_llsd!r}")
|
||||
return categories, items
|
||||
|
||||
|
||||
# Thankfully we have 9 billion different ways to represent inventory data.
|
||||
def ais_item_to_inventory_data(ais_item: dict) -> Block:
|
||||
return Block(
|
||||
"InventoryData",
|
||||
ItemID=ais_item["item_id"],
|
||||
FolderID=ais_item["parent_id"],
|
||||
CallbackID=0,
|
||||
CreatorID=ais_item["permissions"]["creator_id"],
|
||||
OwnerID=ais_item["permissions"]["owner_id"],
|
||||
GroupID=ais_item["permissions"]["group_id"],
|
||||
BaseMask=ais_item["permissions"]["base_mask"],
|
||||
OwnerMask=ais_item["permissions"]["owner_mask"],
|
||||
GroupMask=ais_item["permissions"]["group_mask"],
|
||||
EveryoneMask=ais_item["permissions"]["everyone_mask"],
|
||||
NextOwnerMask=ais_item["permissions"]["next_owner_mask"],
|
||||
GroupOwned=0,
|
||||
AssetID=ais_item["asset_id"],
|
||||
Type=ais_item["type"],
|
||||
InvType=ais_item["inv_type"],
|
||||
Flags=ais_item["flags"],
|
||||
SaleType=ais_item["sale_info"]["sale_type"],
|
||||
SalePrice=ais_item["sale_info"]["sale_price"],
|
||||
Name=ais_item["name"],
|
||||
Description=ais_item["desc"],
|
||||
CreationDate=ais_item["created_at"],
|
||||
# Meaningless here
|
||||
CRC=secrets.randbits(32),
|
||||
)
|
||||
|
||||
|
||||
def inventory_data_to_ais_item(inventory_data: Block) -> dict:
|
||||
return dict(
|
||||
item_id=inventory_data["ItemID"],
|
||||
parent_id=inventory_data["ParentID"],
|
||||
permissions=dict(
|
||||
creator_id=inventory_data["CreatorID"],
|
||||
owner_id=inventory_data["OwnerID"],
|
||||
group_id=inventory_data["GroupID"],
|
||||
base_mask=inventory_data["BaseMask"],
|
||||
owner_mask=inventory_data["OwnerMask"],
|
||||
group_mask=inventory_data["GroupMask"],
|
||||
everyone_mask=inventory_data["EveryoneMask"],
|
||||
next_owner_mask=inventory_data["NextOwnerMask"],
|
||||
),
|
||||
asset_id=inventory_data["AssetID"],
|
||||
type=inventory_data["Type"],
|
||||
inv_type=inventory_data["InvType"],
|
||||
flags=inventory_data["Flags"],
|
||||
sale_info=dict(
|
||||
sale_type=inventory_data["SaleType"],
|
||||
sale_price=inventory_data["SalePrice"],
|
||||
),
|
||||
name=inventory_data["Name"],
|
||||
description=inventory_data["Description"],
|
||||
creation_at=inventory_data["CreationDate"],
|
||||
)
|
||||
|
||||
|
||||
def ais_folder_to_inventory_data(ais_folder: dict) -> Block:
|
||||
return Block(
|
||||
"FolderData",
|
||||
FolderID=ais_folder["cat_id"],
|
||||
ParentID=ais_folder["parent_id"],
|
||||
CallbackID=0,
|
||||
Type=ais_folder["preferred_type"],
|
||||
Name=ais_folder["name"],
|
||||
)
|
||||
|
||||
|
||||
def inventory_data_to_ais_folder(inventory_data: Block) -> dict:
|
||||
return dict(
|
||||
cat_id=inventory_data["FolderID"],
|
||||
parent_id=inventory_data["ParentID"],
|
||||
preferred_type=inventory_data["Type"],
|
||||
name=inventory_data["Name"],
|
||||
)
|
||||
@@ -36,3 +36,4 @@ class BaseClientSession(abc.ABC):
|
||||
region_by_handle: Callable[[int], Optional[BaseClientRegion]]
|
||||
region_by_circuit_addr: Callable[[ADDR_TUPLE], Optional[BaseClientRegion]]
|
||||
objects: ClientWorldObjectManager
|
||||
login_data: Dict[str, Any]
|
||||
|
||||
@@ -7,7 +7,6 @@ import copy
|
||||
import dataclasses
|
||||
import multiprocessing
|
||||
import pickle
|
||||
import secrets
|
||||
import warnings
|
||||
|
||||
from hippolyzer.lib.base.datatypes import UUID, Vector3
|
||||
@@ -103,46 +102,6 @@ def send_chat(message: Union[bytes, str], channel=0, chat_type=ChatType.NORMAL,
|
||||
))
|
||||
|
||||
|
||||
def ais_item_to_inventory_data(ais_item: dict):
|
||||
return Block(
|
||||
"InventoryData",
|
||||
ItemID=ais_item["item_id"],
|
||||
FolderID=ais_item["parent_id"],
|
||||
CallbackID=0,
|
||||
CreatorID=ais_item["permissions"]["creator_id"],
|
||||
OwnerID=ais_item["permissions"]["owner_id"],
|
||||
GroupID=ais_item["permissions"]["group_id"],
|
||||
BaseMask=ais_item["permissions"]["base_mask"],
|
||||
OwnerMask=ais_item["permissions"]["owner_mask"],
|
||||
GroupMask=ais_item["permissions"]["group_mask"],
|
||||
EveryoneMask=ais_item["permissions"]["everyone_mask"],
|
||||
NextOwnerMask=ais_item["permissions"]["next_owner_mask"],
|
||||
GroupOwned=0,
|
||||
AssetID=ais_item["asset_id"],
|
||||
Type=ais_item["type"],
|
||||
InvType=ais_item["inv_type"],
|
||||
Flags=ais_item["flags"],
|
||||
SaleType=ais_item["sale_info"]["sale_type"],
|
||||
SalePrice=ais_item["sale_info"]["sale_price"],
|
||||
Name=ais_item["name"],
|
||||
Description=ais_item["desc"],
|
||||
CreationDate=ais_item["created_at"],
|
||||
# Meaningless here
|
||||
CRC=secrets.randbits(32),
|
||||
)
|
||||
|
||||
|
||||
def ais_folder_to_inventory_data(ais_folder: dict):
|
||||
return Block(
|
||||
"FolderData",
|
||||
FolderID=ais_folder["cat_id"],
|
||||
ParentID=ais_folder["parent_id"],
|
||||
CallbackID=0,
|
||||
Type=ais_folder["preferred_type"],
|
||||
Name=ais_folder["name"],
|
||||
)
|
||||
|
||||
|
||||
class MetaBaseAddon(abc.ABCMeta):
|
||||
"""
|
||||
Metaclass for BaseAddon that prevents class member assignments from clobbering descriptors
|
||||
|
||||
@@ -199,9 +199,9 @@ class AddonManager:
|
||||
@classmethod
|
||||
def _check_hotreloads(cls):
|
||||
"""Mark addons that rely on changed files for reloading"""
|
||||
for filename, importers in cls.HOTRELOAD_IMPORTERS.items():
|
||||
mtime = get_mtime(filename)
|
||||
if not mtime or mtime == cls.FILE_MTIMES.get(filename, None):
|
||||
for file_path, importers in cls.HOTRELOAD_IMPORTERS.items():
|
||||
mtime = get_mtime(file_path)
|
||||
if not mtime or mtime == cls.FILE_MTIMES.get(file_path, None):
|
||||
continue
|
||||
|
||||
# Mark anything that imported this as dirty too, handling circular
|
||||
@@ -220,10 +220,15 @@ class AddonManager:
|
||||
|
||||
_dirty_importers(importers)
|
||||
|
||||
if file_path not in cls.BASE_ADDON_SPECS:
|
||||
# Make sure we won't reload importers in a loop if this is actually something
|
||||
# that was dynamically imported, where `hot_reload()` might not be called again!
|
||||
cls.FILE_MTIMES[file_path] = mtime
|
||||
|
||||
@classmethod
|
||||
def hot_reload(cls, mod: Any, require_addons_loaded=False):
|
||||
# Solely to trick the type checker because ModuleType doesn't apply where it should
|
||||
# and Protocols aren't well supported yet.
|
||||
# and Protocols aren't well-supported yet.
|
||||
imported_mod: ModuleType = mod
|
||||
imported_file = imported_mod.__file__
|
||||
# Mark the caller as having imported (and being dependent on) `module`
|
||||
@@ -432,22 +437,34 @@ class AddonManager:
|
||||
chat_type: int = message["ChatData"]["ChatType"]
|
||||
# RLV-style OwnerSay?
|
||||
if chat and chat.startswith("@") and chat_type == 8:
|
||||
# RLV-style command, `@<cmd>(:<option1>;<option2>)?(=<param>)?`
|
||||
options, _, param = chat.rpartition("=")
|
||||
cmd, _, options = options.lstrip("@").partition(":")
|
||||
options = options.split(";")
|
||||
source = message["ChatData"]["SourceID"]
|
||||
try:
|
||||
with addon_ctx.push(session, region):
|
||||
handled = cls._call_all_addon_hooks("handle_rlv_command",
|
||||
session, region, source, cmd, options, param)
|
||||
if handled:
|
||||
region.circuit.drop_message(message)
|
||||
return True
|
||||
except:
|
||||
LOG.exception(f"Failed while handling command {chat!r}")
|
||||
if not cls._SWALLOW_ADDON_EXCEPTIONS:
|
||||
raise
|
||||
# RLV allows putting multiple commands into one message, blindly splitting on ",".
|
||||
chat = chat.lstrip("@")
|
||||
all_cmds_handled = True
|
||||
for command_str in chat.split(","):
|
||||
if not command_str:
|
||||
continue
|
||||
# RLV-style command, `@<cmd>(:<option1>;<option2>)?(=<param>)?`
|
||||
options, _, param = command_str.partition("=")
|
||||
cmd, _, options = options.partition(":")
|
||||
# TODO: Not always correct, commands can specify their own parsing for the option field
|
||||
options = options.split(";") if options else []
|
||||
source = message["ChatData"]["SourceID"]
|
||||
try:
|
||||
with addon_ctx.push(session, region):
|
||||
handled = cls._call_all_addon_hooks("handle_rlv_command",
|
||||
session, region, source, cmd, options, param)
|
||||
if handled:
|
||||
region.circuit.drop_message(message)
|
||||
else:
|
||||
all_cmds_handled = False
|
||||
except:
|
||||
LOG.exception(f"Failed while handling command {command_str!r}")
|
||||
all_cmds_handled = False
|
||||
if not cls._SWALLOW_ADDON_EXCEPTIONS:
|
||||
raise
|
||||
# Drop the chat message if all commands it contained were handled by an addon
|
||||
if all_cmds_handled:
|
||||
return True
|
||||
|
||||
with addon_ctx.push(session, region):
|
||||
return cls._call_all_addon_hooks("handle_lludp_message", session, region, message)
|
||||
|
||||
@@ -2,7 +2,7 @@ from hippolyzer.lib.base.datatypes import UUID
|
||||
from hippolyzer.lib.base.message.message import Message, Block
|
||||
from hippolyzer.lib.base.network.transport import Direction
|
||||
from hippolyzer.lib.client.asset_uploader import AssetUploader
|
||||
from hippolyzer.lib.proxy.addon_utils import ais_item_to_inventory_data
|
||||
from hippolyzer.lib.client.inventory_manager import ais_item_to_inventory_data
|
||||
|
||||
|
||||
class ProxyAssetUploader(AssetUploader):
|
||||
|
||||
28
hippolyzer/lib/proxy/inventory_manager.py
Normal file
28
hippolyzer/lib/proxy/inventory_manager.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import datetime as dt
|
||||
|
||||
from hippolyzer.lib.base.helpers import get_mtime
|
||||
from hippolyzer.lib.client.inventory_manager import InventoryManager
|
||||
from hippolyzer.lib.client.state import BaseClientSession
|
||||
from hippolyzer.lib.proxy.viewer_settings import iter_viewer_cache_dirs
|
||||
|
||||
|
||||
class ProxyInventoryManager(InventoryManager):
|
||||
def __init__(self, session: BaseClientSession):
|
||||
super().__init__(session)
|
||||
newest_cache = None
|
||||
newest_timestamp = dt.datetime(year=1970, month=1, day=1, tzinfo=dt.timezone.utc)
|
||||
# Look for the newest version of the cached inventory and use that.
|
||||
# Not foolproof, but close enough if we're not sure what viewer is being used.
|
||||
for cache_dir in iter_viewer_cache_dirs():
|
||||
inv_cache_path = cache_dir / (str(session.agent_id) + ".inv.llsd.gz")
|
||||
if inv_cache_path.exists():
|
||||
mod = get_mtime(inv_cache_path)
|
||||
if not mod:
|
||||
continue
|
||||
mod_ts = dt.datetime.fromtimestamp(mod, dt.timezone.utc)
|
||||
if mod_ts <= newest_timestamp:
|
||||
continue
|
||||
newest_cache = inv_cache_path
|
||||
|
||||
if newest_cache:
|
||||
self.load_cache(newest_cache)
|
||||
@@ -401,7 +401,7 @@ class AbstractMessageLogEntry(abc.ABC):
|
||||
beautified = minidom.parseString(content).toprettyxml(indent=" ")
|
||||
# kill blank lines. will break cdata sections. meh.
|
||||
beautified = re.sub(r'\n\s*\n', '\n', beautified, flags=re.MULTILINE)
|
||||
return re.sub(r'<([\w]+)>\s*</\1>', r'<\1></\1>',
|
||||
return re.sub(r'<(\w+)>\s*</\1>', r'<\1></\1>',
|
||||
beautified, flags=re.MULTILINE)
|
||||
|
||||
|
||||
@@ -522,7 +522,7 @@ class HTTPMessageLogEntry(AbstractMessageLogEntry):
|
||||
buf.write(bytes(headers).decode("utf8", errors="replace"))
|
||||
buf.write("\r\n")
|
||||
|
||||
buf.write(message_body)
|
||||
buf.write(message_body or "")
|
||||
return buf.getvalue()
|
||||
|
||||
def request(self, beautify=False, replacements=None):
|
||||
@@ -549,6 +549,12 @@ class HTTPMessageLogEntry(AbstractMessageLogEntry):
|
||||
return self._summary
|
||||
|
||||
def _guess_content_type(self, message):
|
||||
# SL's login service lies and says that its XML-RPC response is LLSD+XML.
|
||||
# It is not, and it blows up the parser. It's been broken ever since the
|
||||
# login rewrite and a fix is likely not forthcoming. I'm sick of seeing
|
||||
# the traceback, so just hack around it.
|
||||
if self.name == "LoginRequest":
|
||||
return "application/xml"
|
||||
content_type = message.headers.get("Content-Type", "")
|
||||
if not message.content or content_type.startswith("application/llsd"):
|
||||
return content_type
|
||||
|
||||
@@ -10,6 +10,7 @@ from typing import *
|
||||
from weakref import ref
|
||||
|
||||
from hippolyzer.lib.base.datatypes import UUID
|
||||
from hippolyzer.lib.base.helpers import proxify
|
||||
from hippolyzer.lib.base.message.message import Message
|
||||
from hippolyzer.lib.base.message.message_handler import MessageHandler
|
||||
from hippolyzer.lib.client.state import BaseClientSession
|
||||
@@ -18,6 +19,7 @@ from hippolyzer.lib.proxy.circuit import ProxiedCircuit
|
||||
from hippolyzer.lib.proxy.http_asset_repo import HTTPAssetRepo
|
||||
from hippolyzer.lib.proxy.http_proxy import HTTPFlowContext
|
||||
from hippolyzer.lib.proxy.caps import is_asset_server_cap_name, CapData, CapType
|
||||
from hippolyzer.lib.proxy.inventory_manager import ProxyInventoryManager
|
||||
from hippolyzer.lib.proxy.namecache import ProxyNameCache
|
||||
from hippolyzer.lib.proxy.object_manager import ProxyWorldObjectManager
|
||||
from hippolyzer.lib.proxy.region import ProxiedRegion
|
||||
@@ -47,6 +49,7 @@ class Session(BaseClientSession):
|
||||
self.message_handler: MessageHandler[Message, str] = MessageHandler()
|
||||
self.http_message_handler: MessageHandler[HippoHTTPFlow, str] = MessageHandler()
|
||||
self.objects = ProxyWorldObjectManager(self, session_manager.settings, session_manager.name_cache)
|
||||
self.inventory = ProxyInventoryManager(proxify(self))
|
||||
# Base path of a newview type cache directory for this session
|
||||
self.cache_dir: Optional[str] = None
|
||||
self._main_region = None
|
||||
|
||||
@@ -15,6 +15,7 @@ cryptography==36.0.2
|
||||
defusedxml==0.7.1
|
||||
Flask==2.0.2
|
||||
frozenlist==1.2.0
|
||||
gltflib==1.0.13
|
||||
Glymur==0.9.6
|
||||
h11==0.12.0
|
||||
h2==4.1.0
|
||||
|
||||
3
setup.py
3
setup.py
@@ -25,7 +25,7 @@ from setuptools import setup, find_packages
|
||||
|
||||
here = path.abspath(path.dirname(__file__))
|
||||
|
||||
version = '0.11.3'
|
||||
version = '0.12.1'
|
||||
|
||||
with open(path.join(here, 'README.md')) as readme_fh:
|
||||
readme = readme_fh.read()
|
||||
@@ -102,6 +102,7 @@ setup(
|
||||
# Needed for mesh format conversion tooling
|
||||
'pycollada',
|
||||
'transformations',
|
||||
'gltflib',
|
||||
],
|
||||
tests_require=[
|
||||
"pytest",
|
||||
|
||||
@@ -113,7 +113,7 @@ executables = [
|
||||
|
||||
setup(
|
||||
name="hippolyzer_gui",
|
||||
version="0.9.0",
|
||||
version="0.12.1",
|
||||
description="Hippolyzer GUI",
|
||||
options=options,
|
||||
executables=executables,
|
||||
|
||||
@@ -79,6 +79,20 @@ class TestDatatypes(unittest.TestCase):
|
||||
quat = Quaternion(X=128.0, Y=128.0, Z=22.0)
|
||||
self.assertEqual(quat, (128.0, 128.0, 22.0, 0.0))
|
||||
|
||||
def test_quaternion_euler_roundtrip(self):
|
||||
orig_vec = Vector3(0.0, -1.0, 2.0)
|
||||
quat = Quaternion.from_euler(*orig_vec)
|
||||
for orig_comp, new_comp in zip(orig_vec, quat.to_euler()):
|
||||
self.assertAlmostEqual(orig_comp, new_comp)
|
||||
|
||||
def test_quaternion_transformations(self):
|
||||
quat = Quaternion(0.4034226801113349, -0.2590347239999257, 0.7384602626041288, 0.4741598817790379)
|
||||
expected_trans = (0.4741598817790379, 0.4034226801113349, -0.2590347239999257, 0.7384602626041288)
|
||||
trans_quat = quat.to_transformations()
|
||||
self.assertSequenceEqual(expected_trans, trans_quat)
|
||||
new_quat = Quaternion.from_transformations(trans_quat)
|
||||
self.assertEqual(quat, new_quat)
|
||||
|
||||
def test_uuid_from_bytes(self):
|
||||
tmp_uuid = uuid.UUID('2b7f7a6e-32c5-dbfd-e2c7-926d1a9f0aca')
|
||||
tmp_uuid2 = uuid.UUID('1dd5efe2-faaf-1864-5ac9-bc61c5d8d7ea')
|
||||
@@ -135,6 +149,9 @@ class TestDatatypes(unittest.TestCase):
|
||||
self.assertIsInstance(val, UUID)
|
||||
self.assertEqual(orig, val)
|
||||
|
||||
def test_str_llsd_serialization(self):
|
||||
self.assertEqual(b"'foo\\nbar'", llsd.format_notation("foo\nbar"))
|
||||
|
||||
def test_jank_stringy_bytes(self):
|
||||
val = JankStringyBytes(b"foo\x00")
|
||||
self.assertTrue("o" in val)
|
||||
|
||||
@@ -122,7 +122,8 @@ class TestLegacyInv(unittest.TestCase):
|
||||
'last_owner_id': UUID('a2e76fcd-9360-4f6d-a924-000000000003'),
|
||||
'next_owner_mask': 581632,
|
||||
'owner_id': UUID('a2e76fcd-9360-4f6d-a924-000000000003'),
|
||||
'owner_mask': 2147483647
|
||||
'owner_mask': 2147483647,
|
||||
'is_owner_group': 0,
|
||||
},
|
||||
'sale_info': {
|
||||
'sale_price': 10,
|
||||
|
||||
@@ -40,6 +40,8 @@ class TestMesh(unittest.TestCase):
|
||||
writer.write(serializer, reader.read(serializer))
|
||||
second_buf = writer.copy_buffer()
|
||||
self.assertEqual(first_buf, second_buf)
|
||||
# Dates may not round-trip correctly, but length should always be the same
|
||||
self.assertEqual(len(first_buf), len(self.slm_bytes))
|
||||
|
||||
def test_serialize_raw_segments(self):
|
||||
serializer = LLMeshSerializer(include_raw_segments=True)
|
||||
|
||||
@@ -6,6 +6,8 @@ import uuid
|
||||
from io import BytesIO
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
from hippolyzer.lib.base.datatypes import *
|
||||
import hippolyzer.lib.base.serialization as se
|
||||
from hippolyzer.lib.base.llanim import Animation, Joint, RotKeyframe
|
||||
@@ -693,6 +695,46 @@ class NameValueSerializationTests(BaseSerializationTest):
|
||||
deser.to_dict()
|
||||
|
||||
|
||||
class NumPySerializationTests(BaseSerializationTest):
|
||||
def setUp(self) -> None:
|
||||
super().setUp()
|
||||
self.writer.endianness = "<"
|
||||
|
||||
def test_simple(self):
|
||||
quant_spec = se.Vector3U16(0.0, 1.0)
|
||||
self.writer.write(quant_spec, Vector3(0, 0.1, 0))
|
||||
self.writer.write(quant_spec, Vector3(1, 1, 1))
|
||||
|
||||
reader = self._get_reader()
|
||||
np_spec = se.NumPyArray(se.BytesGreedy(), np.dtype(np.uint16), 3)
|
||||
np_val = reader.read(np_spec)
|
||||
expected_arr = np.array([[0, 6554, 0], [0xFFFF, 0xFFFF, 0xFFFF]], dtype=np.uint16)
|
||||
np.testing.assert_array_equal(expected_arr, np_val)
|
||||
|
||||
# Make sure writing the array back works correctly
|
||||
orig_buf = self.writer.copy_buffer()
|
||||
self.writer.clear()
|
||||
self.writer.write(np_spec, expected_arr)
|
||||
self.assertEqual(orig_buf, self.writer.copy_buffer())
|
||||
|
||||
def test_quantization(self):
|
||||
quant_spec = se.Vector3U16(0.0, 1.0)
|
||||
self.writer.write(quant_spec, Vector3(0, 0.1, 0))
|
||||
self.writer.write(quant_spec, Vector3(1, 1, 1))
|
||||
|
||||
reader = self._get_reader()
|
||||
np_spec = se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), np.dtype(np.uint16), 3), 0.0, 1.0)
|
||||
np_val = reader.read(np_spec)
|
||||
expected_arr = np.array([[0, 0.1, 0], [1, 1, 1]], dtype=np.float64)
|
||||
np.testing.assert_array_almost_equal(expected_arr, np_val, decimal=5)
|
||||
|
||||
# Make sure writing the array back works correctly
|
||||
orig_buf = self.writer.copy_buffer()
|
||||
self.writer.clear()
|
||||
self.writer.write(np_spec, expected_arr)
|
||||
self.assertEqual(orig_buf, self.writer.copy_buffer())
|
||||
|
||||
|
||||
class AnimSerializationTests(BaseSerializationTest):
|
||||
SIMPLE_ANIM = b'\x01\x00\x00\x00\x01\x00\x00\x00H\x11\xd1?\x00\x00\x00\x00\x00H\x11\xd1?\x00\x00\x00\x00' \
|
||||
b'\xcd\xccL>\x9a\x99\x99>\x01\x00\x00\x00\x02\x00\x00\x00mNeck\x00\x01\x00\x00\x00\x03\x00' \
|
||||
|
||||
Reference in New Issue
Block a user