33 Commits

Author SHA1 Message Date
Salad Dais
ea475b528f v0.12.2 2022-10-14 06:17:07 +00:00
Salad Dais
2036e3c5b3 Add LEAP / outleap support 2022-10-14 06:11:51 +00:00
Salad Dais
584d9f11e8 Use llsd package instead of llbase.llsd 2022-10-14 03:47:48 +00:00
Salad Dais
df020281f1 Remove send_message() alias 2022-09-28 11:46:24 +00:00
Salad Dais
78c1b8869e Remove LEAP-related code
It lives in https://github.com/SaladDais/outleap now.
Hippolyzer-specific integration will be added back in later.
2022-09-19 04:37:31 +00:00
Salad Dais
87d5e8340b Split LEAPProtocol out of LEAPClient 2022-09-18 18:05:16 +00:00
Salad Dais
e6423d2f43 More work on LEAP API wrappers 2022-09-18 07:49:18 +00:00
Salad Dais
fac44a12b0 Update cap templates 2022-09-18 05:05:00 +00:00
Salad Dais
99ca7b1674 Allow paths for text_input() 2022-09-18 05:04:36 +00:00
Salad Dais
e066724a2f Add API wrappers for LLUI and LLWindow LEAP APIs 2022-09-18 03:28:20 +00:00
Salad Dais
dce032de31 Get both scoped and unscoped LEAP listeners working 2022-09-17 22:30:47 +00:00
Salad Dais
2f578b2bc4 More LEAP work 2022-09-17 08:50:52 +00:00
Salad Dais
0c1656e6ab Start of basic LEAP client / forwarding agent 2022-09-16 09:06:01 +00:00
Salad Dais
2b6d8a70f4 v0.12.1 2022-09-12 14:30:18 +00:00
Salad Dais
1a308e9671 Mesh serialization clarifications 2022-09-12 14:17:33 +00:00
Salad Dais
7b21e5634c Slightly faster weights (de)serialization 2022-09-10 00:04:49 +00:00
Salad Dais
e4548a285d Serialize LLMesh internals with NumPy
Easy 2x speedup! Still need to do the vertex weights, but those
have irregular alignment.
2022-09-08 23:44:53 +00:00
Salad Dais
72e926f04c Better bind shape application 2022-09-08 18:58:28 +00:00
Salad Dais
d9fa14b17c Faster vec3 normalization 2022-09-08 18:27:01 +00:00
Salad Dais
33c5abaaf4 Clarify glTF comments 2022-09-08 17:17:54 +00:00
Salad Dais
2dfd61fcc5 Only calculate inverse transpose bind shape matrix once 2022-09-08 05:48:52 +00:00
Salad Dais
eb58e747ce Fix glTF skinning implementation
Still a little funky, but things display correctly now.
2022-09-08 00:32:10 +00:00
Salad Dais
1d221a2289 glTF: Apply bone scale and rotation to inverse bind matrices instead
Blender can't do anything intelligent with them. Fixes major display
issues for collision volume scaling. Figure out how to round-trip
correctly on export later.
2022-09-02 06:47:09 +00:00
Salad Dais
2ffd0458d0 More glTF cleanup 2022-09-01 20:20:02 +00:00
Salad Dais
25f533a31b glTF fixups, parse skeleton definition from avatar_skeleton.xml 2022-09-01 16:57:36 +00:00
Salad Dais
570dbce181 Add WIP glTF conversion code
Related to #24
2022-08-29 14:10:56 +00:00
Salad Dais
ccb63e971b Reorganize collada code a bit 2022-08-29 13:49:55 +00:00
Salad Dais
8be4bce8bc Make mesh uploader handle multi-faced meshes 2022-08-22 01:15:35 +00:00
Salad Dais
e945706d2b Don't hardcode VisualParams path 2022-08-21 04:52:30 +00:00
Salad Dais
6c748a6ab2 More collada notes 2022-08-21 04:52:05 +00:00
Salad Dais
6abc7ca7d2 Fix colladatools log call 2022-08-19 16:57:31 +00:00
Salad Dais
c57e0e467c Better handle dynamically-imported hot_reload()s 2022-08-19 16:54:42 +00:00
Salad Dais
e46b4adad2 Update collada notes 2022-08-18 15:44:23 +00:00
27 changed files with 997 additions and 127 deletions

View File

@@ -0,0 +1,43 @@
"""
Example of how to control a viewer over LEAP
Must launch the viewer with `outleap-agent` LEAP script.
See https://github.com/SaladDais/outleap/ for more info on LEAP / outleap.
"""
import outleap
from outleap.scripts.inspector import LEAPInspectorGUI
from hippolyzer.lib.proxy.addon_utils import send_chat, BaseAddon, show_message
from hippolyzer.lib.proxy.commands import handle_command
from hippolyzer.lib.proxy.region import ProxiedRegion
from hippolyzer.lib.proxy.sessions import Session
# Path found using `outleap-inspector`
FPS_PATH = outleap.UIPath("/main_view/menu_stack/status_bar_container/status/time_and_media_bg/FPSText")
class LEAPExampleAddon(BaseAddon):
@handle_command()
async def show_ui_inspector(self, session: Session, _region: ProxiedRegion):
"""Spawn a GUI for inspecting the UI state"""
if not session.leap_client:
show_message("No LEAP client connected?")
return
LEAPInspectorGUI(session.leap_client).show()
@handle_command()
async def say_fps(self, session: Session, _region: ProxiedRegion):
"""Say your current FPS in chat"""
if not session.leap_client:
show_message("No LEAP client connected?")
return
window_api = outleap.LLWindowAPI(session.leap_client)
fps = (await window_api.get_info(path=FPS_PATH))['value']
send_chat(f"LEAP says I'm running at {fps} FPS!")
addons = [LEAPExampleAddon()]

View File

@@ -7,6 +7,8 @@ in the appropriate format.
from pathlib import Path
from typing import *
from hippolyzer.lib.base.mesh import LLMeshSerializer
from hippolyzer.lib.base.serialization import BufferReader
from hippolyzer.lib.base.templates import AssetType
from hippolyzer.lib.proxy.addons import AddonManager
from hippolyzer.lib.proxy.addon_utils import show_message, BaseAddon
@@ -38,8 +40,11 @@ class UploaderAddon(BaseAddon):
try:
if asset_type == AssetType.MESH:
# Kicking off a mesh upload works a little differently internally
# Half-parse the mesh so that we can figure out how many faces it has
reader = BufferReader("!", file_body)
mesh = reader.read(LLMeshSerializer(parse_segment_contents=False))
upload_token = await region.asset_uploader.initiate_mesh_upload(
name, file_body, flags=flags
name, mesh, flags=flags
)
else:
upload_token = await region.asset_uploader.initiate_asset_upload(

View File

@@ -9,6 +9,7 @@ from typing import Optional
import mitmproxy.ctx
import mitmproxy.exceptions
import outleap
from hippolyzer.lib.base import llsd
from hippolyzer.lib.proxy.addons import AddonManager
@@ -112,6 +113,7 @@ def start_proxy(session_manager: SessionManager, extra_addons: Optional[list] =
udp_proxy_port = session_manager.settings.SOCKS_PROXY_PORT
http_proxy_port = session_manager.settings.HTTP_PROXY_PORT
leap_port = session_manager.settings.LEAP_PORT
if proxy_host is None:
proxy_host = session_manager.settings.PROXY_BIND_ADDR
@@ -143,6 +145,10 @@ def start_proxy(session_manager: SessionManager, extra_addons: Optional[list] =
coro = asyncio.start_server(server.handle_connection, proxy_host, udp_proxy_port)
async_server = loop.run_until_complete(coro)
leap_server = outleap.LEAPBridgeServer(session_manager.leap_client_connected)
coro = asyncio.start_server(leap_server.handle_connection, proxy_host, leap_port)
async_leap_server = loop.run_until_complete(coro)
event_manager = MITMProxyEventManager(session_manager, flow_context)
loop.create_task(event_manager.run())
@@ -169,6 +175,8 @@ def start_proxy(session_manager: SessionManager, extra_addons: Optional[list] =
# Close the server
print("Closing SOCKS server")
async_server.close()
print("Shutting down LEAP server")
async_leap_server.close()
print("Shutting down addons")
AddonManager.shutdown()
print("Waiting for SOCKS server to close")

View File

@@ -15,7 +15,7 @@ import logging
import os.path
import secrets
import sys
from typing import Dict, List, Optional, Union, Sequence
from typing import Dict, Optional
import collada
import collada.source
@@ -24,23 +24,20 @@ from lxml import etree
import numpy as np
import transformations
from hippolyzer.lib.base.datatypes import Vector3
from hippolyzer.lib.base.helpers import get_resource_filename
from hippolyzer.lib.base.serialization import BufferReader
from hippolyzer.lib.base.mesh import LLMeshSerializer, MeshAsset, positions_from_domain, SkinSegmentDict
from hippolyzer.lib.base.mesh import (
LLMeshSerializer,
MeshAsset,
positions_from_domain,
SkinSegmentDict,
llsd_to_mat4,
)
LOG = logging.getLogger(__name__)
DIR = os.path.dirname(os.path.realpath(__file__))
def llsd_to_mat4(mat: Union[np.ndarray, Sequence[float]]) -> np.ndarray:
return np.array(mat).reshape((4, 4), order='F')
def mat4_to_llsd(mat: np.ndarray) -> List[float]:
return list(mat.flatten(order='F'))
def mat4_to_collada(mat: np.ndarray) -> np.ndarray:
return mat.flatten(order='C')
@@ -98,7 +95,7 @@ def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
reflective=0.0,
shadingtype="blinn",
shininess=0.0,
diffuse=(0.0, 0.0, 0.0),
diffuse=(1.0, 1.0, 1.0),
)
mat = collada.material.Material(f"material{sub_uniq}", f"material{sub_uniq}", effect)
@@ -190,6 +187,8 @@ def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
if should_skin:
# We need a skeleton per _mesh asset_ because you could have incongruous skeletons
# within the same linkset.
# TODO: can we maintain some kind of skeleton cache, where if this skeleton has no conflicts
# with another skeleton in the cache, we just use that skeleton and add any additional joints?
skel_root = load_skeleton_nodes()
transform_skeleton(skel_root, dae, skin_seg)
skel = collada.scene.Node.load(dae, skel_root, {})
@@ -211,7 +210,6 @@ def load_skeleton_nodes() -> etree.ElementBase:
def transform_skeleton(skel_root: etree.ElementBase, dae: collada.Collada, skin_seg: SkinSegmentDict,
include_unreferenced_bones=False):
"""Update skeleton XML nodes to account for joint translations in the mesh"""
# TODO: Use translation component only.
joint_nodes: Dict[str, collada.scene.Node] = {}
for skel_node in skel_root.iter():
# xpath is loathsome so this is easier.
@@ -262,48 +260,61 @@ def _create_mat4_source(name: str, data: np.ndarray, semantic: str):
return source
def fix_weird_bind_matrices(skin_seg: SkinSegmentDict):
def fix_weird_bind_matrices(skin_seg: SkinSegmentDict) -> None:
"""
Fix weird-looking bind matrices to have normal scaling and rotations
Fix weird-looking bind matrices to have sensible scaling and rotations
Not sure why these even happen (weird mesh authoring programs?)
Sometimes get enormous inverse bind matrices (each component 10k+) and tiny
Sometimes we get enormous inverse bind matrices (each component 10k+) and tiny
bind shape matrix components. This detects inverse bind shape matrices
with weird scales and tries to set them to what they "should" be without
the weird inverted scaling.
"""
scale_fixup = Vector3(1, 1, 1)
angle_fixup = Vector3(0, 0, 0)
have_fixups = False
# Totally non-scientific method of detecting odd bind matrices based on squinting very,
# very hard at a random sample of assets.
for joint_name, joint_inv in zip(skin_seg['joint_names'], skin_seg['inverse_bind_matrix']):
if not joint_name.startswith("m"):
# We can't make very good guesses based on collision volume scales and rotations,
# skip anything but the "m" joints.
continue
joint_mat = llsd_to_mat4(joint_inv)
joint_scale, _, joint_angle, _, _ = transformations.decompose_matrix(joint_mat)
# If the scale component of an mJointName joint isn't roughly <1,1,1>, we likely have
# scaling applied to the inverse bind matrices rather than the bind matrix. Figure out
# what the fixup should be so that we can reverse it.
if abs(3.0 - sum(joint_scale)) > 0.5:
scale_fixup = Vector3(1, 1, 1) / Vector3(*joint_scale)
have_fixups = True
# I wouldn't expect mJointName joints to be rotated at all in their inverse bind matrices.
# Is this a rotation that should've been applied to the bind shape matrix instead?
# In any event, all joints are likely rotated by this amount, so calculate the inverse.
if abs(sum(joint_angle)) > 0.05:
angle_fixup = -Vector3(*joint_angle)
have_fixups = True
if have_fixups:
LOG.warning("Detected weird matrices in mesh!", scale_fixup, angle_fixup)
# The magnitude of the scales in the inverse bind matrices look very strange.
# The bind matrix itself is probably messed up as well, try to fix it.
# TODO: DON'T MESS WITH INVERSE TRANSLATION!!!! Only bind shape gets its translation scaled.
# TODO: put this back in, the previous logic was totally wrong-headed..
pass
# Sometimes we get mesh assets that have the vertex data naturally in y-up orientation,
# and get re-oriented to z-up not through the bind shape matrix, but through the
# transforms in the inverse bind matrices!
#
# Blender, for one, does not like this very much, and generally won't generate mesh
# assets like this, as explained here https://developer.blender.org/T38660.
# In vanilla Blender, these mesh assets will show up scaled and rotated _only_ according
# to the bind shape matrix, which may end up with the model 25 meters tall and sitting
# on its side.
#
# https://avalab.org/avastar/292/knowledge/compare-workbench/, while somewhat outdated,
# has some information on rest pose vs default pose and scaling that I believe is relevant.
# https://github.com/KhronosGroup/glTF-Blender-IO/issues/994 as well.
#
# While trying to figure out what was going on, I searched for something like
# "inverse bind matrix scale collada", "bind pose scale blender", etc. Pretty much every
# result was either a bug filed by, or a question asked by the creator of Avastar, or an SL user.
# I think that says a lot about how annoying it is to author mesh for SL in particular.
#
# I spent a good month or so tearing my hair out over this wondering how these values could
# even be possible. I wasn't sure how I should write mesh import code if I don't understand
# how to interpret existing data, or how it even ended up the way it did. Turns out I wasn't
# misinterpreting the data, the data really is just weird.
#
# I'd also had the idea that you could sniff which body a given rigged asset was meant
# for by doing trivial matching on the inverse bind matrices, but obviously that isn't true!
#
# Basically:
# 1) Maya is evil and generates evil, this evil bleeds into SL's assets through transforms.
# 2) Blender is also evil, but in a manner that doesn't agree with Maya's evil.
# 3) Collada was a valiant effort, but is evil in practice. Seemingly simple Collada
# files are interpreted completely differently by Blender, Maya, and sometimes SL.
# 4) Those three evils collude to make an interop nightmare for everyone like "oh my rigger
# rigs using Maya and now my model is huge and all my normals are fucked on reimport"
# 5) Yes, there's still good reasons to be using Avastar in 2022 even though nobody authoring
# rigged mesh for any other use has to use something similar.
if not skin_seg['joint_names']:
return
# TODO: calculate the correct inverse bind matrix scale & rotations from avatar_skeleton.xml
# definitions. If the rotation and scale factors are the same across all inverse bind matrices then
# they can be moved over to the bind shape matrix to keep Blender happy.
# Maybe add a scaled / rotated empty as a parent for the armature instead?
return
def main():

View File

@@ -39,6 +39,9 @@ class _IterableStub:
__iter__: Callable
RAD_TO_DEG = 180 / math.pi
class TupleCoord(recordclass.datatuple, _IterableStub): # type: ignore
__options__ = {
"fast_new": False,
@@ -372,5 +375,5 @@ class TaggedUnion(recordclass.datatuple): # type: ignore
__all__ = [
"Vector3", "Vector4", "Vector2", "Quaternion", "TupleCoord",
"UUID", "RawBytes", "StringEnum", "JankStringyBytes", "TaggedUnion",
"IntEnum", "IntFlag", "flags_to_pod", "Pretty"
"IntEnum", "IntFlag", "flags_to_pod", "Pretty", "RAD_TO_DEG"
]

View File

@@ -59,17 +59,15 @@ class Event:
continue
if one_shot:
self.unsubscribe(instance, *inner_args, **kwargs)
if instance(args, *inner_args, **kwargs):
if instance(args, *inner_args, **kwargs) and not one_shot:
self.unsubscribe(instance, *inner_args, **kwargs)
def get_subscriber_count(self):
def __len__(self):
return len(self.subscribers)
def clear_subscribers(self):
self.subscribers.clear()
return self
__iadd__ = subscribe
__isub__ = unsubscribe
__call__ = notify
__len__ = get_subscriber_count

View File

@@ -0,0 +1,525 @@
"""
WIP LLMesh -> glTF converter, for testing eventual glTF -> LLMesh conversion logic.
"""
# TODO:
# * Simple tests
# * Round-tripping skinning data from Blender-compatible glTF back to LLMesh (maybe through rig retargeting?)
# * Panda3D-glTF viewer for LLMesh? The glTFs seem to work fine in Panda3D-glTF's `gltf-viewer`.
# * Check if skew and projection components of transform matrices are ignored in practice as the spec requires.
# I suppose this would render some real assets impossible to represent with glTF.
import dataclasses
import math
import pprint
import sys
import uuid
from pathlib import Path
from typing import *
import gltflib
import numpy as np
import transformations
from hippolyzer.lib.base.datatypes import Vector3
from hippolyzer.lib.base.mesh import (
LLMeshSerializer, MeshAsset, positions_from_domain, SkinSegmentDict, VertexWeight, llsd_to_mat4
)
from hippolyzer.lib.base.mesh_skeleton import AVATAR_SKELETON
from hippolyzer.lib.base.serialization import BufferReader
class IdentityList(list):
"""
List, but does index() by object identity, not equality
GLTF references objects by their index within some list, but we prefer to pass around
actual object references internally. If we don't do this, then when we try and get
a GLTF reference to a given object via `.index()` then we could end up actually getting
a reference to some other object that just happens to be equal. This was causing issues
with all primitives ending up with the same material, due to the default material's value
being the same across all primitives.
"""
def index(self, value, start: Optional[int] = None, stop: Optional[int] = None) -> int:
view = self[start:stop]
for i, x in enumerate(view):
if x is value:
if start:
return i + start
return i
raise ValueError(value)
def sl_to_gltf_coords(coords):
"""
SL (X, Y, Z) -> GL (X, Z, Y), as GLTF commandeth
Note that this will only work when reordering axes, flipping an axis is more complicated.
"""
return coords[0], coords[2], coords[1], *coords[3:]
def sl_to_gltf_uv(uv):
"""Flip the V coordinate of a UV to match glTF convention"""
return [uv[0], -uv[1]]
def sl_mat4_to_gltf(mat: np.ndarray) -> List[float]:
"""
Convert an SL Mat4 to the glTF coordinate system
This should only be done immediately before storing the matrix in a glTF structure!
"""
# TODO: This is probably not correct. We definitely need to flip Z but there's
# probably a better way to do it.
decomp = [sl_to_gltf_coords(x) for x in transformations.decompose_matrix(mat)]
trans = decomp[3]
decomp[3] = (trans[0], trans[1], -trans[2])
return list(transformations.compose_matrix(*decomp).flatten(order='F'))
# Mat3 to convert points from SL coordinate space to GLTF coordinate space
POINT_TO_GLTF_MAT = transformations.compose_matrix(angles=(-(math.pi / 2), 0, 0))[:3, :3]
def sl_vec3_array_to_gltf(vec_list: np.ndarray) -> np.ndarray:
new_array = []
for x in vec_list:
new_array.append(POINT_TO_GLTF_MAT.dot(x))
return np.array(new_array)
def sl_weights_to_gltf(sl_weights: List[List[VertexWeight]]) -> Tuple[np.ndarray, np.ndarray]:
"""Convert SL Weights to separate JOINTS_0 and WEIGHTS_0 vec4 arrays"""
joints = np.zeros((len(sl_weights), 4), dtype=np.uint8)
weights = np.zeros((len(sl_weights), 4), dtype=np.float32)
for i, vert_weights in enumerate(sl_weights):
# We need to re-normalize these since the quantization can mess them up
collected_weights = []
for j, vert_weight in enumerate(vert_weights):
joints[i, j] = vert_weight.joint_idx
collected_weights.append(vert_weight.weight)
weight_sum = sum(collected_weights)
if weight_sum:
for j, weight in enumerate(collected_weights):
weights[i, j] = weight / weight_sum
return joints, weights
def normalize_vec3(a):
norm = np.linalg.norm(a)
if norm == 0:
return a
return a / norm
def apply_bind_shape_matrix(bind_shape_matrix: np.ndarray, verts: np.ndarray, norms: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray]:
"""
Apply the bind shape matrix to the mesh data
glTF expects all verts and normals to be in armature-local space so that mesh data can be shared
between differently-oriented armatures. Or something.
# https://github.com/KhronosGroup/glTF-Blender-IO/issues/566#issuecomment-523119339
glTF also doesn't have a concept of a "bind shape matrix" like Collada does
per its skinning docs, so we have to mix it into the mesh data manually.
See https://github.com/KhronosGroup/glTF-Tutorials/blob/master/gltfTutorial/gltfTutorial_020_Skins.md
"""
scale, _, angles, translation, _ = transformations.decompose_matrix(bind_shape_matrix)
scale_mat = transformations.compose_matrix(scale=scale)[:3, :3]
rot_mat = transformations.euler_matrix(*angles)[:3, :3]
rot_scale_mat = scale_mat @ np.linalg.inv(rot_mat)
# Apply the SRT transform to each vert
verts = (verts @ rot_scale_mat) + translation
# Our scale is unlikely to be uniform, so we have to fix up our normals as well.
# https://paroj.github.io/gltut/Illumination/Tut09%20Normal%20Transformation.html
inv_transpose_mat = np.transpose(np.linalg.inv(bind_shape_matrix)[:3, :3])
new_norms = [normalize_vec3(inv_transpose_mat @ norm) for norm in norms]
return verts, np.array(new_norms)
@dataclasses.dataclass
class JointContext:
node: gltflib.Node
# Original matrix for the bone, may have custom translation, but otherwise the same.
orig_matrix: np.ndarray
# xform that must be applied to inverse bind matrices to account for the changed bone
fixup_matrix: np.ndarray
JOINT_CONTEXT_DICT = Dict[str, JointContext]
class GLTFBuilder:
def __init__(self, blender_compatibility=False):
self.scene = gltflib.Scene(nodes=IdentityList())
self.model = gltflib.GLTFModel(
asset=gltflib.Asset(version="2.0"),
accessors=IdentityList(),
nodes=IdentityList(),
materials=IdentityList(),
buffers=IdentityList(),
bufferViews=IdentityList(),
meshes=IdentityList(),
skins=IdentityList(),
scenes=IdentityList((self.scene,)),
extensionsUsed=["KHR_materials_specular"],
scene=0,
)
self.gltf = gltflib.GLTF(
model=self.model,
resources=IdentityList(),
)
self.blender_compatibility = blender_compatibility
def add_nodes_from_llmesh(self, mesh: MeshAsset, name: str, mesh_transform: Optional[np.ndarray] = None):
"""Build a glTF version of a mesh asset, appending it and its armature to the scene root"""
# TODO: mesh data instancing?
# consider https://github.com/KhronosGroup/glTF-Blender-IO/issues/1634.
if mesh_transform is None:
mesh_transform = np.identity(4)
skin_seg: Optional[SkinSegmentDict] = mesh.segments.get('skin')
skin = None
if skin_seg:
mesh_transform = llsd_to_mat4(skin_seg['bind_shape_matrix'])
joint_ctxs = self.add_joints(skin_seg)
# Give our armature a root node and parent the pelvis to it
armature_node = self.add_node("Armature")
self.scene.nodes.append(self.model.nodes.index(armature_node))
armature_node.children.append(self.model.nodes.index(joint_ctxs['mPelvis'].node))
skin = self.add_skin("Armature", joint_ctxs, skin_seg)
skin.skeleton = self.model.nodes.index(armature_node)
primitives = []
# Just the high LOD for now
for submesh in mesh.segments['high_lod']:
verts = np.array(positions_from_domain(submesh['Position'], submesh['PositionDomain']))
norms = np.array(submesh['Normal'])
tris = np.array(submesh['TriangleList'])
joints = np.array([])
weights = np.array([])
range_uv = np.array([])
if "TexCoord0" in submesh:
range_uv = np.array(positions_from_domain(submesh['TexCoord0'], submesh['TexCoord0Domain']))
if 'Weights' in submesh:
joints, weights = sl_weights_to_gltf(submesh['Weights'])
if skin:
# Convert verts and norms to armature-local space
verts, norms = apply_bind_shape_matrix(mesh_transform, verts, norms)
primitives.append(self.add_primitive(
tris=tris,
positions=verts,
normals=norms,
uvs=range_uv,
joints=joints,
weights=weights,
))
mesh_node = self.add_node(
name,
self.add_mesh(name, primitives),
transform=mesh_transform,
)
if skin:
# Node translation isn't relevant, we're going to use the bind matrices
# If you pull this into Blender you may want to untick "Guess Original Bind Pose",
# it guesses that based on the inverse bind matrices which may have Maya poisoning.
# TODO: Maybe we could automatically undo that by comparing expected bone scale and rot
# to scale and rot in the inverse bind matrices, and applying fixups to the
# bind shape matrix and inverse bind matrices?
mesh_node.matrix = None
mesh_node.skin = self.model.skins.index(skin)
self.scene.nodes.append(self.model.nodes.index(mesh_node))
def add_node(
self,
name: str,
mesh: Optional[gltflib.Mesh] = None,
transform: Optional[np.ndarray] = None,
) -> gltflib.Node:
node = gltflib.Node(
name=name,
mesh=self.model.meshes.index(mesh) if mesh else None,
matrix=sl_mat4_to_gltf(transform) if transform is not None else None,
children=[],
)
self.model.nodes.append(node)
return node
def add_mesh(
self,
name: str,
primitives: List[gltflib.Primitive],
) -> gltflib.Mesh:
for i, prim in enumerate(primitives):
# Give the materials a name relating to what "face" they belong to
self.model.materials[prim.material].name = f"{name}.{i:03}"
mesh = gltflib.Mesh(name=name, primitives=primitives)
self.model.meshes.append(mesh)
return mesh
def add_primitive(
self,
tris: np.ndarray,
positions: np.ndarray,
normals: np.ndarray,
uvs: np.ndarray,
weights: np.ndarray,
joints: np.ndarray,
) -> gltflib.Primitive:
# Make a Material for the primitive. Materials pretty much _are_ the primitives in
# LLMesh, so just make them both in one go. We need a unique material for each primitive.
material = gltflib.Material(
pbrMetallicRoughness=gltflib.PBRMetallicRoughness(
baseColorFactor=[1.0, 1.0, 1.0, 1.0],
metallicFactor=0.0,
roughnessFactor=0.0,
),
extensions={
"KHR_materials_specular": {
"specularFactor": 0.0,
"specularColorFactor": [0, 0, 0]
},
}
)
self.model.materials.append(material)
attributes = gltflib.Attributes(
POSITION=self.maybe_add_vec_array(sl_vec3_array_to_gltf(positions), gltflib.AccessorType.VEC3),
NORMAL=self.maybe_add_vec_array(sl_vec3_array_to_gltf(normals), gltflib.AccessorType.VEC3),
TEXCOORD_0=self.maybe_add_vec_array(np.array([sl_to_gltf_uv(uv) for uv in uvs]), gltflib.AccessorType.VEC2),
JOINTS_0=self.maybe_add_vec_array(joints, gltflib.AccessorType.VEC4, gltflib.ComponentType.UNSIGNED_BYTE),
WEIGHTS_0=self.maybe_add_vec_array(weights, gltflib.AccessorType.VEC4),
)
return gltflib.Primitive(
attributes=attributes,
indices=self.model.accessors.index(self.add_scalars(tris)),
material=self.model.materials.index(material),
mode=gltflib.PrimitiveMode.TRIANGLES,
)
def add_scalars(self, scalars: np.ndarray) -> gltflib.Accessor:
"""
Add a potentially multidimensional array of scalars, returning the accessor
Generally only used for triangle indices
"""
scalar_bytes = scalars.astype(np.uint32).flatten().tobytes()
buffer_view = self.add_buffer_view(scalar_bytes, None)
accessor = gltflib.Accessor(
bufferView=self.model.bufferViews.index(buffer_view),
componentType=gltflib.ComponentType.UNSIGNED_INT,
count=scalars.size, # use the flattened size!
type=gltflib.AccessorType.SCALAR.value, # type: ignore
min=[int(scalars.min())], # type: ignore
max=[int(scalars.max())], # type: ignore
)
self.model.accessors.append(accessor)
return accessor
def maybe_add_vec_array(
self,
vecs: np.ndarray,
vec_type: gltflib.AccessorType,
component_type: gltflib.ComponentType = gltflib.ComponentType.FLOAT,
) -> Optional[int]:
if not vecs.size:
return None
accessor = self.add_vec_array(vecs, vec_type, component_type)
return self.model.accessors.index(accessor)
def add_vec_array(
self,
vecs: np.ndarray,
vec_type: gltflib.AccessorType,
component_type: gltflib.ComponentType = gltflib.ComponentType.FLOAT
) -> gltflib.Accessor:
"""
Add a two-dimensional array of vecs (positions, normals, weights, UVs) returning the accessor
Vec type may be a vec2, vec3, or a vec4.
"""
# Pretty much all of these are float32 except the ones that aren't
dtype = np.float32
if component_type == gltflib.ComponentType.UNSIGNED_BYTE:
dtype = np.uint8
vec_data = vecs.astype(dtype).tobytes()
buffer_view = self.add_buffer_view(vec_data, target=None)
accessor = gltflib.Accessor(
bufferView=self.model.bufferViews.index(buffer_view),
componentType=component_type,
count=len(vecs),
type=vec_type.value, # type: ignore
min=vecs.min(axis=0).tolist(), # type: ignore
max=vecs.max(axis=0).tolist(), # type: ignore
)
self.model.accessors.append(accessor)
return accessor
def add_buffer_view(self, data: bytes, target: Optional[gltflib.BufferTarget]) -> gltflib.BufferView:
"""Create a buffer view and associated buffer and resource for a blob of data"""
resource = gltflib.FileResource(filename=f"res-{uuid.uuid4()}.bin", data=data)
self.gltf.resources.append(resource)
buffer = gltflib.Buffer(uri=resource.filename, byteLength=len(resource.data))
self.model.buffers.append(buffer)
buffer_view = gltflib.BufferView(
buffer=self.model.buffers.index(buffer),
byteLength=buffer.byteLength,
byteOffset=0,
target=target
)
self.model.bufferViews.append(buffer_view)
return buffer_view
def add_joints(self, skin: SkinSegmentDict) -> JOINT_CONTEXT_DICT:
joints: JOINT_CONTEXT_DICT = {}
# There may be some joints not present in the mesh that we need to add to reach the mPelvis root
required_joints = AVATAR_SKELETON.get_required_joints(skin['joint_names'])
# If this is present, it may override the joint positions from the skeleton definition
if 'alt_inverse_bind_matrix' in skin:
joint_overrides = dict(zip(skin['joint_names'], skin['alt_inverse_bind_matrix']))
else:
joint_overrides = {}
for joint_name in required_joints:
joint = AVATAR_SKELETON[joint_name]
joint_matrix = joint.matrix
# Do we have a joint position override that would affect joint_matrix?
override = joint_overrides.get(joint_name)
if override:
decomp = list(transformations.decompose_matrix(joint_matrix))
# We specifically only want the translation from the override!
translation = transformations.translation_from_matrix(llsd_to_mat4(override))
# Only do it if the difference is over 0.1mm though
if Vector3.dist(Vector3(*translation), joint.translation) > 0.0001:
decomp[3] = translation
joint_matrix = transformations.compose_matrix(*decomp)
# Do we need to mess with the bone's matrices to make Blender cooperate?
orig_matrix = joint_matrix
fixup_matrix = np.identity(4)
if self.blender_compatibility:
joint_matrix, fixup_matrix = self._fix_blender_joint(joint_matrix)
# TODO: populate "extras" here with the metadata the Blender collada stuff uses to store
# "bind_mat" and "rest_mat" so we can go back to our original matrices when exporting
# from blender to .dae!
node = self.add_node(joint_name, transform=joint_matrix)
# Store the node along with any fixups we may need to apply to the bind matrices later
joints[joint_name] = JointContext(node, orig_matrix, fixup_matrix)
# Add each joint to the child list of their respective parent
for joint_name, joint_ctx in joints.items():
if parent := AVATAR_SKELETON[joint_name].parent:
joints[parent().name].node.children.append(self.model.nodes.index(joint_ctx.node))
return joints
def _fix_blender_joint(self, joint_matrix: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Split a joint matrix into a joint matrix and fixup matrix
If we don't account for weird scaling on the collision volumes, then
Blender freaks out. This is an issue in blender where it doesn't
apply the inverse bind matrices relative to the scale and rotation of
the bones themselves, as it should per the glTF spec. Blender's glTF loader
tries to recover from this by applying certain transforms as a pose, but
the damage has been done by that point. Nobody else runs really runs into
this because they have the good sense to not use some nightmare abomination
rig with scaling and rotation on the skeleton like SL does.
Blender will _only_ correctly handle the translation component of the joint,
any other transforms need to be mixed into the inverse bind matrices themselves.
There's no internal concept of bone scale or rot in Blender right now.
Should investigate an Avastar-style approach of optionally retargeting
to a Blender-compatible rig with translation-only bones, and modify
the bind matrices to accommodate. The glTF importer supports metadata through
the "extras" fields, so we can potentially abuse the "bind_mat" metadata field
that Blender already uses for the "Keep Bind Info" Collada import / export hack.
For context:
* https://github.com/KhronosGroup/glTF-Blender-IO/issues/1305
* https://developer.blender.org/T38660 (these are Collada, but still relevant)
* https://developer.blender.org/T29246
* https://developer.blender.org/T50412
* https://developer.blender.org/T53620 (FBX but still relevant)
"""
scale, shear, angles, translate, projection = transformations.decompose_matrix(joint_matrix)
joint_matrix = transformations.compose_matrix(translate=translate)
fixup_matrix = transformations.compose_matrix(scale=scale, angles=angles)
return joint_matrix, fixup_matrix
def add_skin(self, name: str, joint_nodes: JOINT_CONTEXT_DICT, skin_seg: SkinSegmentDict) -> gltflib.Skin:
joints_arr = []
for joint_name in skin_seg['joint_names']:
joint_ctx = joint_nodes[joint_name]
joints_arr.append(self.model.nodes.index(joint_ctx.node))
inv_binds = []
for joint_name, inv_bind in zip(skin_seg['joint_names'], skin_seg['inverse_bind_matrix']):
joint_ctx = joint_nodes[joint_name]
inv_bind = joint_ctx.fixup_matrix @ llsd_to_mat4(inv_bind)
inv_binds.append(sl_mat4_to_gltf(inv_bind))
inv_binds_data = np.array(inv_binds, dtype=np.float32).tobytes()
buffer_view = self.add_buffer_view(inv_binds_data, target=None)
accessor = gltflib.Accessor(
bufferView=self.model.bufferViews.index(buffer_view),
componentType=gltflib.ComponentType.FLOAT,
count=len(inv_binds),
type=gltflib.AccessorType.MAT4.value, # type: ignore
)
self.model.accessors.append(accessor)
accessor_idx = self.model.accessors.index(accessor)
skin = gltflib.Skin(name=name, joints=joints_arr, inverseBindMatrices=accessor_idx)
self.model.skins.append(skin)
return skin
def finalize(self):
"""Clean up the mesh to pass the glTF smell test, should be done last"""
def _nullify_empty_lists(dc):
for field in dataclasses.fields(dc):
# Empty lists should be replaced with None
if getattr(dc, field.name) == []:
setattr(dc, field.name, None)
for node in self.model.nodes:
_nullify_empty_lists(node)
_nullify_empty_lists(self.model)
return self.gltf
def main():
# Take an llmesh file as an argument and spit out basename-converted.gltf
with open(sys.argv[1], "rb") as f:
reader = BufferReader("<", f.read())
filename = Path(sys.argv[1]).stem
mesh: MeshAsset = reader.read(LLMeshSerializer(parse_segment_contents=True))
builder = GLTFBuilder(blender_compatibility=True)
builder.add_nodes_from_llmesh(mesh, filename)
gltf = builder.finalize()
pprint.pprint(gltf.model)
gltf.export_glb(sys.argv[1].rsplit(".", 1)[0] + "-converted.gltf")
if __name__ == "__main__":
main()

View File

@@ -1,14 +1,19 @@
import calendar
import datetime
import struct
import typing
import uuid
import zlib
from llbase.llsd import *
from llsd import *
# So we can directly reference the original wrapper funcs where necessary
import llbase.llsd
import llsd as base_llsd
from llsd.base import is_string, is_unicode
from hippolyzer.lib.base.datatypes import *
class HippoLLSDBaseFormatter(llbase.llsd.LLSDBaseFormatter):
class HippoLLSDBaseFormatter(base_llsd.base.LLSDBaseFormatter):
UUID: callable
ARRAY: callable
@@ -24,12 +29,12 @@ class HippoLLSDBaseFormatter(llbase.llsd.LLSDBaseFormatter):
return self.ARRAY(v.data())
class HippoLLSDXMLFormatter(llbase.llsd.LLSDXMLFormatter, HippoLLSDBaseFormatter):
class HippoLLSDXMLFormatter(base_llsd.serde_xml.LLSDXMLFormatter, HippoLLSDBaseFormatter):
def __init__(self):
super().__init__()
class HippoLLSDXMLPrettyFormatter(llbase.llsd.LLSDXMLPrettyFormatter, HippoLLSDBaseFormatter):
class HippoLLSDXMLPrettyFormatter(base_llsd.serde_xml.LLSDXMLPrettyFormatter, HippoLLSDBaseFormatter):
def __init__(self):
super().__init__()
@@ -42,7 +47,7 @@ def format_xml(val: typing.Any):
return HippoLLSDXMLFormatter().format(val)
class HippoLLSDNotationFormatter(llbase.llsd.LLSDNotationFormatter, HippoLLSDBaseFormatter):
class HippoLLSDNotationFormatter(base_llsd.serde_notation.LLSDNotationFormatter, HippoLLSDBaseFormatter):
def __init__(self):
super().__init__()
@@ -84,7 +89,7 @@ def _format_binary_recurse(something) -> bytes:
return b'1'
else:
return b'0'
elif is_integer(something):
elif isinstance(something, int):
try:
return b'i' + struct.pack('!i', something)
except (OverflowError, struct.error) as exc:
@@ -129,7 +134,7 @@ def _format_binary_recurse(something) -> bytes:
(type(something), something))
class HippoLLSDBinaryParser(llbase.llsd.LLSDBinaryParser):
class HippoLLSDBinaryParser(base_llsd.serde_binary.LLSDBinaryParser):
def __init__(self):
super().__init__()
self._dispatch[ord('u')] = lambda: UUID(bytes=self._getc(16))
@@ -162,11 +167,11 @@ def parse_binary(data: bytes):
def parse_xml(data: bytes):
return llbase.llsd.parse_xml(data)
return base_llsd.parse_xml(data)
def parse_notation(data: bytes):
return llbase.llsd.parse_notation(data)
return base_llsd.parse_notation(data)
def zip_llsd(val: typing.Any):
@@ -189,6 +194,6 @@ def parse(data: bytes):
else:
return parse_notation(data)
except KeyError as e:
raise llbase.llsd.LLSDParseError('LLSD could not be parsed: %s' % (e,))
raise base_llsd.LLSDParseError('LLSD could not be parsed: %s' % (e,))
except TypeError as e:
raise llbase.llsd.LLSDParseError('Input stream not of type bytes. %s' % (e,))
raise base_llsd.LLSDParseError('Input stream not of type bytes. %s' % (e,))

View File

@@ -11,15 +11,25 @@ from typing import *
import zlib
from copy import deepcopy
import numpy as np
import recordclass
from hippolyzer.lib.base import serialization as se
from hippolyzer.lib.base.datatypes import Vector3, Vector2, UUID, TupleCoord
from hippolyzer.lib.base.llsd import zip_llsd, unzip_llsd
from hippolyzer.lib.base.serialization import ParseContext
LOG = logging.getLogger(__name__)
def llsd_to_mat4(mat: Union[np.ndarray, Sequence[float]]) -> np.ndarray:
return np.array(mat).reshape((4, 4), order='F')
def mat4_to_llsd(mat: np.ndarray) -> List[float]:
return list(mat.flatten(order='F'))
@dataclasses.dataclass
class MeshAsset:
header: MeshHeaderDict = dataclasses.field(default_factory=dict)
@@ -255,7 +265,6 @@ def positions_to_domain(positions: Iterable[TupleCoord], domain: DomainDict):
class VertexWeights(se.SerializableBase):
"""Serializer for a list of joint weights on a single vertex"""
INFLUENCE_SER = se.QuantizedFloat(se.U16, 0.0, 1.0)
INFLUENCE_LIMIT = 4
INFLUENCE_TERM = 0xFF
@@ -266,18 +275,30 @@ class VertexWeights(se.SerializableBase):
for val in vals:
joint_idx, influence = val
writer.write(se.U8, joint_idx)
writer.write(cls.INFLUENCE_SER, influence, ctx=ctx)
writer.write(se.U16, round(influence * 0xFFff), ctx=ctx)
if len(vals) != cls.INFLUENCE_LIMIT:
writer.write(se.U8, cls.INFLUENCE_TERM)
@classmethod
def deserialize(cls, reader: se.Reader, ctx=None):
# NOTE: normally you'd want to do something like arrange this into a nicely
# aligned byte array with zero padding so that you could vectorize the decoding.
# In cases where having a vertex with no weights is semantically equivalent to
# having a vertex _with_ weights of a value of 0.0 that's fine. This isn't the case
# in LL's implementation of mesh:
#
# https://bitbucket.org/lindenlab/viewer/src/d31a83fb946c49a38376ea3b312b5380d0c8c065/indra/llmath/llvolume.cpp#lines-2560:2628
#
# Consider the difference between handling of b"\x00\x00\x00\xFF" and b"\xFF" with the above logic.
# To simplify round-tripping while preserving those semantics, we don't do a vectorized decode.
# I had a vectorized numpy version, but those requirements made everything a bit of a mess.
influence_list = []
for _ in range(cls.INFLUENCE_LIMIT):
joint_idx = reader.read(se.U8)
joint_idx = reader.read_bytes(1)[0]
if joint_idx == cls.INFLUENCE_TERM:
break
influence_list.append(VertexWeight(joint_idx, reader.read(cls.INFLUENCE_SER, ctx=ctx)))
weight = reader.read(se.U16, ctx=ctx) / 0xFFff
influence_list.append(VertexWeight(joint_idx, weight))
return influence_list
@@ -312,16 +333,46 @@ class SegmentSerializer:
return new_segment
class VecListAdapter(se.Adapter):
def __init__(self, child_spec: se.SERIALIZABLE_TYPE, vec_type: Type):
super().__init__(child_spec)
self.vec_type = vec_type
def encode(self, val: Any, ctx: Optional[ParseContext]) -> Any:
return val
def decode(self, val: Any, ctx: Optional[ParseContext], pod: bool = False) -> Any:
new_vals = []
for elem in val:
new_vals.append(self.vec_type(*elem))
return new_vals
LE_U16: np.dtype = np.dtype(np.uint16).newbyteorder('<') # noqa
LOD_SEGMENT_SERIALIZER = SegmentSerializer({
# 16-bit indices to the verts making up the tri. Imposes a 16-bit
# upper limit on verts in any given material in the mesh.
"TriangleList": se.Collection(None, se.Collection(3, se.U16)),
"TriangleList": se.ExprAdapter(
se.NumPyArray(se.BytesGreedy(), LE_U16, 3),
decode_func=lambda x: x.tolist(),
),
# These are used to interpolate between values in their respective domains
# Each position represents a single vert.
"Position": se.Collection(None, se.Vector3U16(0.0, 1.0)),
"TexCoord0": se.Collection(None, se.Vector2U16(0.0, 1.0)),
# Normals have a static domain between -1 and 1, so just use that.
"Normal": se.Collection(None, se.Vector3U16(-1.0, 1.0)),
"Position": VecListAdapter(
se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), LE_U16, 3), 0.0, 1.0),
Vector3,
),
"TexCoord0": VecListAdapter(
se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), LE_U16, 2), 0.0, 1.0),
Vector2,
),
# Normals have a static domain between -1 and 1, so we just use that rather than 0.0 - 1.0.
"Normal": VecListAdapter(
se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), LE_U16, 3), -1.0, 1.0),
Vector3,
),
"Weights": se.Collection(None, VertexWeights)
})

View File

@@ -0,0 +1,90 @@
import dataclasses
import weakref
from typing import *
import transformations
from lxml import etree
from hippolyzer.lib.base.datatypes import Vector3, RAD_TO_DEG
from hippolyzer.lib.base.helpers import get_resource_filename
MAYBE_JOINT_REF = Optional[Callable[[], "JointNode"]]
@dataclasses.dataclass(unsafe_hash=True)
class JointNode:
name: str
parent: MAYBE_JOINT_REF
translation: Vector3
pivot: Vector3 # pivot point for the joint, generally the same as translation
rotation: Vector3 # Euler rotation in degrees
scale: Vector3
type: str # bone or collision_volume
@property
def matrix(self):
return transformations.compose_matrix(
scale=tuple(self.scale),
angles=tuple(self.rotation / RAD_TO_DEG),
translate=tuple(self.translation),
)
@dataclasses.dataclass
class Skeleton:
joint_dict: Dict[str, JointNode]
def __getitem__(self, item: str) -> JointNode:
return self.joint_dict[item]
@classmethod
def _parse_node_children(cls, joint_dict: Dict[str, JointNode], node: etree.ElementBase, parent: MAYBE_JOINT_REF):
name = node.get('name')
joint = JointNode(
name=name,
parent=parent,
translation=_get_vec_attr(node, "pos", Vector3()),
pivot=_get_vec_attr(node, "pivot", Vector3()),
rotation=_get_vec_attr(node, "rot", Vector3()),
scale=_get_vec_attr(node, "scale", Vector3(1, 1, 1)),
type=node.tag,
)
joint_dict[name] = joint
for child in node.iterchildren():
cls._parse_node_children(joint_dict, child, weakref.ref(joint))
@classmethod
def from_xml(cls, node: etree.ElementBase):
joint_dict = {}
cls._parse_node_children(joint_dict, node, None)
return cls(joint_dict)
def get_required_joints(self, joint_names: Collection[str]) -> Set[str]:
"""Get all joints required to have a chain from all joints up to the root joint"""
required = set(joint_names)
for joint_name in joint_names:
joint_node = self.joint_dict.get(joint_name)
while joint_node:
required.add(joint_node.name)
if not joint_node.parent:
break
joint_node = joint_node.parent()
return required
def load_avatar_skeleton() -> Skeleton:
skel_path = get_resource_filename("lib/base/data/avatar_skeleton.xml")
with open(skel_path, 'r') as f:
skel_root = etree.fromstring(f.read())
return Skeleton.from_xml(skel_root.getchildren()[0])
def _get_vec_attr(node, attr_name, default) -> Vector3:
attr_val = node.get(attr_name, None)
if not attr_val:
return default
return Vector3(*(float(x) for x in attr_val.split(" ") if x))
AVATAR_SKELETON = load_avatar_skeleton()

View File

@@ -77,9 +77,6 @@ class Circuit:
)
return self._send_prepared_message(message, transport)
# Temporary alias
send_message = send
def send_reliable(self, message: Message, transport=None) -> asyncio.Future:
"""send() wrapper that always sends reliably and allows `await`ing ACK receipt"""
if not message.synthetic:

View File

@@ -20,7 +20,7 @@ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from logging import getLogger
from llbase import llsd
import llsd
from hippolyzer.lib.base.message.data import msg_details

View File

@@ -157,7 +157,6 @@ class UDPMessageDeserializer:
reader.seek(current_template.get_msg_freq_num_len() + msg.offset)
for tmpl_block in current_template.blocks:
LOG.debug("Parsing %s:%s" % (msg.name, tmpl_block.name))
# EOF?
if not len(reader):
# Seems like even some "Single" blocks are optional?
@@ -180,7 +179,6 @@ class UDPMessageDeserializer:
for i in range(repeat_count):
current_block = Block(tmpl_block.name)
LOG.debug("Adding block %s" % current_block.name)
msg.add_block(current_block)
for tmpl_variable in tmpl_block.variables:

View File

@@ -10,6 +10,7 @@ from io import SEEK_CUR, SEEK_SET, SEEK_END, RawIOBase, BufferedIOBase
from typing import *
import lazy_object_proxy
import numpy as np
import hippolyzer.lib.base.llsd as llsd
import hippolyzer.lib.base.datatypes as dtypes
@@ -838,7 +839,7 @@ class QuantizedFloat(QuantizedFloatBase):
super().__init__(prim_spec, zero_median=False)
self.lower = lower
self.upper = upper
# We know the range in `QuantizedFloat` when it's constructed, so we can infer
# We know the range in `QuantizedFloat` when it's constructed, so we can infer
# whether or not we should round towards zero in __init__
max_error = (upper - lower) * self.step_mag
midpoint = (upper + lower) / 2.0
@@ -1610,7 +1611,9 @@ class BitfieldDataclass(DataclassAdapter):
class ExprAdapter(Adapter):
def __init__(self, child_spec: SERIALIZABLE_TYPE, decode_func: Callable, encode_func: Callable):
_ID = lambda x: x
def __init__(self, child_spec: SERIALIZABLE_TYPE, decode_func: Callable = _ID, encode_func: Callable = _ID):
super().__init__(child_spec)
self._decode_func = decode_func
self._encode_func = encode_func
@@ -1659,6 +1662,62 @@ class BinaryLLSD(SerializableBase):
writer.write_bytes(llsd.format_binary(val, with_header=False))
class NumPyArray(Adapter):
"""
An 2-dimensional, dynamic-length array of data from numpy. Greedy.
Unlike most other serializers, your endianness _must_ be specified in the dtype!
"""
__slots__ = ['dtype', 'elems']
def __init__(self, child_spec: Optional[SERIALIZABLE_TYPE], dtype: np.dtype, elems: int):
super().__init__(child_spec)
self.dtype = dtype
self.elems = elems
def _pick_dtype(self, endian: str) -> np.dtype:
return self.dtype.newbyteorder('>') if endian != "<" else self.dtype
def decode(self, val: Any, ctx: Optional[ParseContext], pod: bool = False) -> Any:
num_elems = len(val) // self.dtype.itemsize
num_ndims = num_elems // self.elems
buf_array = np.frombuffer(val, dtype=self.dtype, count=num_elems)
return buf_array.reshape((num_ndims, self.elems))
def encode(self, val, ctx: Optional[ParseContext]) -> Any:
val: np.ndarray = np.array(val, dtype=self.dtype).flatten()
return val.tobytes()
class QuantizedNumPyArray(Adapter):
"""Like QuantizedFloat. Only works correctly for unsigned types, no zero midpoint rounding!"""
def __init__(self, child_spec: NumPyArray, lower: float, upper: float):
super().__init__(child_spec)
self.dtype = child_spec.dtype
self.lower = lower
self.upper = upper
self.step_mag = 1.0 / ((2 ** (self.dtype.itemsize * 8)) - 1)
def encode(self, val: Any, ctx: Optional[ParseContext]) -> Any:
val = np.array(val, dtype=np.float64)
val = np.clip(val, self.lower, self.upper)
delta = self.upper - self.lower
if delta == 0.0:
return np.zeros(val.shape, dtype=self.dtype)
val -= self.lower
val /= delta
val /= self.step_mag
return np.rint(val).astype(self.dtype)
def decode(self, val: Any, ctx: Optional[ParseContext], pod: bool = False) -> Any:
val = val.astype(np.float64)
val *= self.step_mag
val *= self.upper - self.lower
val += self.lower
return val
def subfield_serializer(msg_name, block_name, var_name):
def f(orig_cls):
global SUBFIELD_SERIALIZERS

View File

@@ -35,9 +35,8 @@ class VisualParam:
class VisualParams(List[VisualParam]):
def __init__(self):
def __init__(self, lad_path):
super().__init__()
lad_path = get_resource_filename("lib/base/data/avatar_lad.xml")
with open(lad_path, "rb") as f:
doc = parse_etree(f)
for param in doc.findall(".//param"):
@@ -59,8 +58,11 @@ class VisualParams(List[VisualParam]):
def by_wearable(self, wearable: str) -> List[VisualParam]:
return [x for x in self if x.wearable == wearable]
def by_id(self, vparam_id: int) -> VisualParam:
return [x for x in self if x.id == vparam_id][0]
VISUAL_PARAMS = VisualParams()
VISUAL_PARAMS = VisualParams(get_resource_filename("lib/base/data/avatar_lad.xml"))
@dataclasses.dataclass

View File

@@ -1,4 +1,4 @@
from typing import NamedTuple, Union, Optional
from typing import NamedTuple, Union, Optional, List
import hippolyzer.lib.base.serialization as se
from hippolyzer.lib.base import llsd
@@ -18,6 +18,11 @@ class UploadToken(NamedTuple):
payload: bytes
class MeshUploadDetails(NamedTuple):
mesh_bytes: bytes
num_faces: int
class AssetUploader:
def __init__(self, region: BaseClientRegion):
self._region = region
@@ -69,20 +74,15 @@ class AssetUploader:
"""
pass
# The mesh upload flow is a little special, so it gets its own methods
async def initiate_mesh_upload(self, name: str, mesh: Union[bytes, MeshAsset],
# The mesh upload flow is a little special, so it gets its own method
async def initiate_mesh_upload(self, name: str, mesh: Union[MeshUploadDetails, MeshAsset],
flags: Optional[int] = None) -> UploadToken:
"""
Very basic LL-serialized mesh uploader
Currently only handles a single mesh with a single face and no associated textures.
"""
if isinstance(mesh, MeshAsset):
writer = se.BufferWriter("!")
writer.write(LLMeshSerializer(), mesh)
mesh = writer.copy_buffer()
mesh = MeshUploadDetails(writer.copy_buffer(), len(mesh.segments['high_lod']))
asset_resources = self._build_asset_resources(name, mesh)
asset_resources = self._build_asset_resources(name, [mesh])
payload = {
'asset_resources': asset_resources,
'asset_type': 'mesh',
@@ -102,26 +102,26 @@ class AssetUploader:
upload_body = llsd.format_xml(asset_resources)
return UploadToken(resp_payload["upload_price"], resp_payload["uploader"], upload_body)
def _build_asset_resources(self, name: str, mesh: bytes) -> dict:
def _build_asset_resources(self, name: str, meshes: List[MeshUploadDetails]) -> dict:
instances = []
for mesh in meshes:
instances.append({
'face_list': [{
'diffuse_color': [1.0, 1.0, 1.0, 1.0],
'fullbright': False
}] * mesh.num_faces,
'material': 3,
'mesh': 0,
'mesh_name': name,
'physics_shape_type': 2,
'position': [0.0, 0.0, 0.0],
'rotation': [0.7071067690849304, 0.0, 0.0, 0.7071067690849304],
'scale': [1.0, 1.0, 1.0]
})
return {
'instance_list': [
{
'face_list': [
{
'diffuse_color': [1.0, 1.0, 1.0, 1.0],
'fullbright': False
}
],
'material': 3,
'mesh': 0,
'mesh_name': name,
'physics_shape_type': 2,
'position': [0.0, 0.0, 0.0],
'rotation': [0.7071067690849304, 0.0, 0.0, 0.7071067690849304],
'scale': [1.0, 1.0, 1.0]
}
],
'mesh_list': [mesh],
'instance_list': instances,
'mesh_list': [mesh.mesh_bytes for mesh in meshes],
'metric': 'MUT_Unspecified',
'texture_list': []
}

View File

@@ -199,9 +199,9 @@ class AddonManager:
@classmethod
def _check_hotreloads(cls):
"""Mark addons that rely on changed files for reloading"""
for filename, importers in cls.HOTRELOAD_IMPORTERS.items():
mtime = get_mtime(filename)
if not mtime or mtime == cls.FILE_MTIMES.get(filename, None):
for file_path, importers in cls.HOTRELOAD_IMPORTERS.items():
mtime = get_mtime(file_path)
if not mtime or mtime == cls.FILE_MTIMES.get(file_path, None):
continue
# Mark anything that imported this as dirty too, handling circular
@@ -220,10 +220,15 @@ class AddonManager:
_dirty_importers(importers)
if file_path not in cls.BASE_ADDON_SPECS:
# Make sure we won't reload importers in a loop if this is actually something
# that was dynamically imported, where `hot_reload()` might not be called again!
cls.FILE_MTIMES[file_path] = mtime
@classmethod
def hot_reload(cls, mod: Any, require_addons_loaded=False):
# Solely to trick the type checker because ModuleType doesn't apply where it should
# and Protocols aren't well supported yet.
# and Protocols aren't well-supported yet.
imported_mod: ModuleType = mod
imported_file = imported_mod.__file__
# Mark the caller as having imported (and being dependent on) `module`

View File

@@ -42,7 +42,7 @@ class MITMProxyEventManager:
"UpdateNotecardAgentInventory", "UpdateNotecardTaskInventory",
"UpdateScriptAgent", "UpdateScriptTask",
"UpdateSettingsAgentInventory", "UpdateSettingsTaskInventory",
"UploadBakedTexture",
"UploadBakedTexture", "UploadAgentProfileImage",
}
def __init__(self, session_manager: SessionManager, flow_context: HTTPFlowContext):

View File

@@ -189,7 +189,7 @@ class EventQueueManager:
# over the EQ. That will allow us to shove our own event onto the response once it comes in,
# otherwise we have to wait until the EQ legitimately returns 200 due to a new event.
# May or may not work in OpenSim.
circuit.send_message(Message(
circuit.send(Message(
'PlacesQuery',
Block('AgentData', AgentID=session.agent_id, SessionID=session.id, QueryID=UUID()),
Block('TransactionData', TransactionID=UUID()),

View File

@@ -9,6 +9,8 @@ import weakref
from typing import *
from weakref import ref
from outleap import LEAPClient
from hippolyzer.lib.base.datatypes import UUID
from hippolyzer.lib.base.helpers import proxify
from hippolyzer.lib.base.message.message import Message
@@ -50,6 +52,7 @@ class Session(BaseClientSession):
self.http_message_handler: MessageHandler[HippoHTTPFlow, str] = MessageHandler()
self.objects = ProxyWorldObjectManager(self, session_manager.settings, session_manager.name_cache)
self.inventory = ProxyInventoryManager(proxify(self))
self.leap_client: Optional[LEAPClient] = None
# Base path of a newview type cache directory for this session
self.cache_dir: Optional[str] = None
self._main_region = None
@@ -187,6 +190,7 @@ class SessionManager:
self.message_logger: Optional[BaseMessageLogger] = None
self.addon_ctx: Dict[str, Any] = {}
self.name_cache = ProxyNameCache()
self.pending_leap_clients: List[LEAPClient] = []
def create_session(self, login_data) -> Session:
session = Session.from_login_data(login_data, self)
@@ -203,12 +207,23 @@ class SessionManager:
if session.pending and session.id == session_id:
logging.info("Claimed %r" % session)
session.pending = False
# TODO: less crap way of tying a LEAP client to a session
while self.pending_leap_clients:
leap_client = self.pending_leap_clients.pop(-1)
# Client may have gone bad since it connected
if not leap_client.connected:
continue
logging.info("Assigned LEAP client to session")
session.leap_client = leap_client
break
return session
return None
def close_session(self, session: Session):
logging.info("Closed %r" % session)
session.objects.clear()
if session.leap_client:
session.leap_client.disconnect()
self.sessions.remove(session)
def resolve_cap(self, url: str) -> Optional["CapData"]:
@@ -218,6 +233,9 @@ class SessionManager:
return cap_data
return CapData()
async def leap_client_connected(self, leap_client: LEAPClient):
self.pending_leap_clients.append(leap_client)
@dataclasses.dataclass
class SelectionModel:

View File

@@ -25,6 +25,7 @@ class EnvSettingDescriptor(SettingDescriptor):
class ProxySettings(Settings):
SOCKS_PROXY_PORT: int = EnvSettingDescriptor(9061, "HIPPO_UDP_PORT", int)
HTTP_PROXY_PORT: int = EnvSettingDescriptor(9062, "HIPPO_HTTP_PORT", int)
LEAP_PORT: int = EnvSettingDescriptor(9063, "HIPPO_LEAP_PORT", int)
PROXY_BIND_ADDR: str = EnvSettingDescriptor("127.0.0.1", "HIPPO_BIND_HOST", str)
REMOTELY_ACCESSIBLE: bool = SettingDescriptor(False)
USE_VIEWER_OBJECT_CACHE: bool = SettingDescriptor(False)

View File

@@ -108,4 +108,7 @@ CAP_TEMPLATES: List[CAPTemplate] = [
CAPTemplate(cap_name='ViewerBenefits', method='GET', body=b'', query=set(), path=''),
CAPTemplate(cap_name='SetDisplayName', method='POST', body=b'<?xml version="1.0" ?>\n<llsd>\n<map>\n <key>display_name</key>\n <array>\n <string>OLD_DISPLAY_NAME</string>\n <string>NEW_DISPLAY_NAME</string>\n </array>\n </map>\n</llsd>\n', query=set(), path=''),
CAPTemplate(cap_name='ObjectMediaNavigate', method='POST', body=b'<?xml version="1.0" ?>\n<llsd>\n<map>\n <key>current_url</key>\n <string></string>\n <key>object_id</key>\n <uuid><!HIPPOREPL[[SELECTED_FULL]]></uuid>\n <key>texture_index</key>\n <integer></integer>\n </map>\n</llsd>\n', query=set(), path=''),
CAPTemplate(cap_name='AgentProfile', method='GET', body=b'', query=set(), path='/<SOME_ID>'),
CAPTemplate(cap_name='InterestList', method='POST', body=b'<?xml version="1.0" ?>\n<llsd>\n<map>\n <key>mode</key>\n <string>360</string>\n </map>\n</llsd>', query=set(), path='/'),
CAPTemplate(cap_name='RegionObjects', method='GET', body=b'', query=set(), path=''),
]

View File

@@ -15,6 +15,7 @@ cryptography==36.0.2
defusedxml==0.7.1
Flask==2.0.2
frozenlist==1.2.0
gltflib==1.0.13
Glymur==0.9.6
h11==0.12.0
h2==4.1.0
@@ -27,13 +28,14 @@ Jinja2==3.0.3
kaitaistruct==0.9
lazy-object-proxy==1.6.0
ldap3==2.9.1
llbase==1.2.11
llsd~=1.0.0
lxml==4.6.4
MarkupSafe==2.0.1
mitmproxy==8.0.0
msgpack==1.0.3
multidict==5.2.0
numpy==1.21.4
outleap~=0.4.1
parso==0.8.3
passlib==1.7.4
prompt-toolkit==3.0.23
@@ -65,4 +67,4 @@ wcwidth==0.2.5
Werkzeug==2.0.2
wsproto==1.0.0
yarl==1.7.2
zstandard==0.15.2
zstandard==0.15.2

View File

@@ -25,7 +25,7 @@ from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
version = '0.12.0'
version = '0.12.2'
with open(path.join(here, 'README.md')) as readme_fh:
readme = readme_fh.read()
@@ -75,13 +75,14 @@ setup(
entry_points={
'console_scripts': {
'hippolyzer-gui = hippolyzer.apps.proxy_gui:gui_main',
'hippolyzer-cli = hippolyzer.apps.proxy:main'
'hippolyzer-cli = hippolyzer.apps.proxy:main',
}
},
zip_safe=False,
python_requires='>=3.8',
install_requires=[
'llbase>=1.2.5',
'llsd<1.1.0',
'outleap<1.0',
'defusedxml',
'aiohttp<4.0.0',
'recordclass<0.15',
@@ -102,6 +103,7 @@ setup(
# Needed for mesh format conversion tooling
'pycollada',
'transformations',
'gltflib',
],
tests_require=[
"pytest",

View File

@@ -113,7 +113,7 @@ executables = [
setup(
name="hippolyzer_gui",
version="0.9.0",
version="0.12.2",
description="Hippolyzer GUI",
options=options,
executables=executables,

View File

@@ -40,6 +40,8 @@ class TestMesh(unittest.TestCase):
writer.write(serializer, reader.read(serializer))
second_buf = writer.copy_buffer()
self.assertEqual(first_buf, second_buf)
# Dates may not round-trip correctly, but length should always be the same
self.assertEqual(len(first_buf), len(self.slm_bytes))
def test_serialize_raw_segments(self):
serializer = LLMeshSerializer(include_raw_segments=True)

View File

@@ -6,6 +6,8 @@ import uuid
from io import BytesIO
from typing import Optional
import numpy as np
from hippolyzer.lib.base.datatypes import *
import hippolyzer.lib.base.serialization as se
from hippolyzer.lib.base.llanim import Animation, Joint, RotKeyframe
@@ -693,6 +695,46 @@ class NameValueSerializationTests(BaseSerializationTest):
deser.to_dict()
class NumPySerializationTests(BaseSerializationTest):
def setUp(self) -> None:
super().setUp()
self.writer.endianness = "<"
def test_simple(self):
quant_spec = se.Vector3U16(0.0, 1.0)
self.writer.write(quant_spec, Vector3(0, 0.1, 0))
self.writer.write(quant_spec, Vector3(1, 1, 1))
reader = self._get_reader()
np_spec = se.NumPyArray(se.BytesGreedy(), np.dtype(np.uint16), 3)
np_val = reader.read(np_spec)
expected_arr = np.array([[0, 6554, 0], [0xFFFF, 0xFFFF, 0xFFFF]], dtype=np.uint16)
np.testing.assert_array_equal(expected_arr, np_val)
# Make sure writing the array back works correctly
orig_buf = self.writer.copy_buffer()
self.writer.clear()
self.writer.write(np_spec, expected_arr)
self.assertEqual(orig_buf, self.writer.copy_buffer())
def test_quantization(self):
quant_spec = se.Vector3U16(0.0, 1.0)
self.writer.write(quant_spec, Vector3(0, 0.1, 0))
self.writer.write(quant_spec, Vector3(1, 1, 1))
reader = self._get_reader()
np_spec = se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), np.dtype(np.uint16), 3), 0.0, 1.0)
np_val = reader.read(np_spec)
expected_arr = np.array([[0, 0.1, 0], [1, 1, 1]], dtype=np.float64)
np.testing.assert_array_almost_equal(expected_arr, np_val, decimal=5)
# Make sure writing the array back works correctly
orig_buf = self.writer.copy_buffer()
self.writer.clear()
self.writer.write(np_spec, expected_arr)
self.assertEqual(orig_buf, self.writer.copy_buffer())
class AnimSerializationTests(BaseSerializationTest):
SIMPLE_ANIM = b'\x01\x00\x00\x00\x01\x00\x00\x00H\x11\xd1?\x00\x00\x00\x00\x00H\x11\xd1?\x00\x00\x00\x00' \
b'\xcd\xccL>\x9a\x99\x99>\x01\x00\x00\x00\x02\x00\x00\x00mNeck\x00\x01\x00\x00\x00\x03\x00' \