Compare commits
63 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52dfd0be05 | ||
|
|
60f1737115 | ||
|
|
7a5d6baf02 | ||
|
|
44a332a77b | ||
|
|
beb0a2d6a4 | ||
|
|
9be66df52b | ||
|
|
da0117db1b | ||
|
|
4dbf01a604 | ||
|
|
36858ed3e2 | ||
|
|
370c586582 | ||
|
|
fdfffd96c9 | ||
|
|
6da9f58b23 | ||
|
|
12e3912a37 | ||
|
|
8147e7e1d7 | ||
|
|
19dba6651c | ||
|
|
274f96c710 | ||
|
|
09e1d0b6fc | ||
|
|
f4fb68e310 | ||
|
|
8edf7ae89b | ||
|
|
b6458e9eb7 | ||
|
|
375af1e7f6 | ||
|
|
76d0a72590 | ||
|
|
3255556835 | ||
|
|
d19122c039 | ||
|
|
5692f7b8b6 | ||
|
|
21cea0f009 | ||
|
|
193d762132 | ||
|
|
227fbf7a2e | ||
|
|
25a397bcc5 | ||
|
|
b0dca80b87 | ||
|
|
ea475b528f | ||
|
|
2036e3c5b3 | ||
|
|
584d9f11e8 | ||
|
|
df020281f1 | ||
|
|
78c1b8869e | ||
|
|
87d5e8340b | ||
|
|
e6423d2f43 | ||
|
|
fac44a12b0 | ||
|
|
99ca7b1674 | ||
|
|
e066724a2f | ||
|
|
dce032de31 | ||
|
|
2f578b2bc4 | ||
|
|
0c1656e6ab | ||
|
|
2b6d8a70f4 | ||
|
|
1a308e9671 | ||
|
|
7b21e5634c | ||
|
|
e4548a285d | ||
|
|
72e926f04c | ||
|
|
d9fa14b17c | ||
|
|
33c5abaaf4 | ||
|
|
2dfd61fcc5 | ||
|
|
eb58e747ce | ||
|
|
1d221a2289 | ||
|
|
2ffd0458d0 | ||
|
|
25f533a31b | ||
|
|
570dbce181 | ||
|
|
ccb63e971b | ||
|
|
8be4bce8bc | ||
|
|
e945706d2b | ||
|
|
6c748a6ab2 | ||
|
|
6abc7ca7d2 | ||
|
|
c57e0e467c | ||
|
|
e46b4adad2 |
2
.github/workflows/pytest.yml
vendored
2
.github/workflows/pytest.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.10"]
|
||||
python-version: ["3.8", "3.11"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
@@ -48,8 +48,7 @@ A proxy is provided with both a CLI and Qt-based interface. The proxy applicatio
|
||||
custom SOCKS 5 UDP proxy, as well as an HTTP proxy based on [mitmproxy](https://mitmproxy.org/).
|
||||
|
||||
Multiple clients are supported at a time, and UDP messages may be injected in either
|
||||
direction. The proxy UI was inspired by the Message Log and Message Builder as present in
|
||||
the [Alchemy](https://github.com/AlchemyViewer/Alchemy) viewer.
|
||||
direction.
|
||||
|
||||
### Proxy Setup
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ class BlueishObjectListGUIAddon(BaseAddon):
|
||||
region.objects.request_missing_objects()
|
||||
|
||||
def handle_object_updated(self, session: Session, region: ProxiedRegion,
|
||||
obj: Object, updated_props: Set[str]):
|
||||
obj: Object, updated_props: Set[str], msg: Optional[Message]):
|
||||
if self.blueish_model is None:
|
||||
return
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from hippolyzer.lib.proxy.sessions import Session
|
||||
def handle_lludp_message(session: Session, region: ProxiedRegion, message: Message):
|
||||
# addon_ctx will persist across addon reloads, use for storing data that
|
||||
# needs to survive across calls to this function
|
||||
ctx = session.addon_ctx
|
||||
ctx = session.addon_ctx[__name__]
|
||||
if message.name == "ChatFromViewer":
|
||||
chat = message["ChatData"]["Message"]
|
||||
if chat == "COUNT":
|
||||
|
||||
50
addon_examples/leap_example.py
Normal file
50
addon_examples/leap_example.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""
|
||||
Example of how to control a viewer over LEAP
|
||||
|
||||
Must launch the viewer with `outleap-agent` LEAP script.
|
||||
See https://github.com/SaladDais/outleap/ for more info on LEAP / outleap.
|
||||
"""
|
||||
|
||||
import outleap
|
||||
from outleap.scripts.inspector import LEAPInspectorGUI
|
||||
|
||||
from hippolyzer.lib.proxy.addon_utils import send_chat, BaseAddon, show_message
|
||||
from hippolyzer.lib.proxy.commands import handle_command
|
||||
from hippolyzer.lib.proxy.region import ProxiedRegion
|
||||
from hippolyzer.lib.proxy.sessions import Session, SessionManager
|
||||
|
||||
|
||||
# Path found using `outleap-inspector`
|
||||
FPS_PATH = outleap.UIPath("/main_view/menu_stack/status_bar_container/status/time_and_media_bg/FPSText")
|
||||
|
||||
|
||||
class LEAPExampleAddon(BaseAddon):
|
||||
async def handle_leap_client_added(self, session_manager: SessionManager, leap_client: outleap.LEAPClient):
|
||||
# You can do things as soon as the LEAP client connects, like if you want to automate
|
||||
# login or whatever.
|
||||
viewer_control_api = outleap.LLViewerControlAPI(leap_client)
|
||||
# Ask for a config value and print it in the viewer logs
|
||||
print(await viewer_control_api.get("Global", "StatsPilotFile"))
|
||||
|
||||
@handle_command()
|
||||
async def show_ui_inspector(self, session: Session, _region: ProxiedRegion):
|
||||
"""Spawn a GUI for inspecting the UI state"""
|
||||
if not session.leap_client:
|
||||
show_message("No LEAP client connected?")
|
||||
return
|
||||
LEAPInspectorGUI(session.leap_client).show()
|
||||
|
||||
@handle_command()
|
||||
async def say_fps(self, session: Session, _region: ProxiedRegion):
|
||||
"""Say your current FPS in chat"""
|
||||
if not session.leap_client:
|
||||
show_message("No LEAP client connected?")
|
||||
return
|
||||
|
||||
window_api = outleap.LLWindowAPI(session.leap_client)
|
||||
fps = (await window_api.get_info(path=FPS_PATH))['value']
|
||||
|
||||
send_chat(f"LEAP says I'm running at {fps} FPS!")
|
||||
|
||||
|
||||
addons = [LEAPExampleAddon()]
|
||||
@@ -230,7 +230,7 @@ class MeshUploadInterceptingAddon(BaseAddon):
|
||||
show_message("Mangled upload request")
|
||||
|
||||
def handle_object_updated(self, session: Session, region: ProxiedRegion,
|
||||
obj: Object, updated_props: Set[str]):
|
||||
obj: Object, updated_props: Set[str], msg: Optional[Message]):
|
||||
if obj.LocalID not in self.local_mesh_target_locals:
|
||||
return
|
||||
if "Name" not in updated_props or obj.Name is None:
|
||||
|
||||
@@ -10,6 +10,7 @@ before you start tracking can help too.
|
||||
from typing import *
|
||||
|
||||
from hippolyzer.lib.base.datatypes import UUID
|
||||
from hippolyzer.lib.base.message.message import Message
|
||||
from hippolyzer.lib.base.objects import Object
|
||||
from hippolyzer.lib.base.templates import PCode
|
||||
from hippolyzer.lib.proxy.addon_utils import BaseAddon, show_message, SessionProperty
|
||||
@@ -20,7 +21,7 @@ from hippolyzer.lib.proxy.sessions import Session
|
||||
|
||||
class ObjectUpdateBlameAddon(BaseAddon):
|
||||
update_blame_counter: Counter[UUID] = SessionProperty(Counter)
|
||||
track_update_blame: bool = SessionProperty(False)
|
||||
should_track_update_blame: bool = SessionProperty(False)
|
||||
|
||||
@handle_command()
|
||||
async def precache_objects(self, _session: Session, region: ProxiedRegion):
|
||||
@@ -38,11 +39,11 @@ class ObjectUpdateBlameAddon(BaseAddon):
|
||||
|
||||
@handle_command()
|
||||
async def track_update_blame(self, _session: Session, _region: ProxiedRegion):
|
||||
self.track_update_blame = True
|
||||
self.should_track_update_blame = True
|
||||
|
||||
@handle_command()
|
||||
async def untrack_update_blame(self, _session: Session, _region: ProxiedRegion):
|
||||
self.track_update_blame = False
|
||||
self.should_track_update_blame = False
|
||||
|
||||
@handle_command()
|
||||
async def clear_update_blame(self, _session: Session, _region: ProxiedRegion):
|
||||
@@ -57,8 +58,8 @@ class ObjectUpdateBlameAddon(BaseAddon):
|
||||
print(f"{obj_id} ({name!r}): {count}")
|
||||
|
||||
def handle_object_updated(self, session: Session, region: ProxiedRegion,
|
||||
obj: Object, updated_props: Set[str]):
|
||||
if not self.track_update_blame:
|
||||
obj: Object, updated_props: Set[str], msg: Optional[Message]):
|
||||
if not self.should_track_update_blame:
|
||||
return
|
||||
if region != session.main_region:
|
||||
return
|
||||
|
||||
111
addon_examples/puppetry_example.py
Normal file
111
addon_examples/puppetry_example.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""
|
||||
Control a puppetry-enabled viewer and make your neck spin like crazy
|
||||
|
||||
It currently requires a custom rebased Firestorm with puppetry applied on top,
|
||||
and patches applied on top to make startup LEAP scripts be treated as puppetry modules.
|
||||
Basically, you probably don't want to use this yet. But hey, Puppetry is still only
|
||||
on the beta grid anyway.
|
||||
"""
|
||||
import asyncio
|
||||
import enum
|
||||
import logging
|
||||
import math
|
||||
from typing import *
|
||||
|
||||
import outleap
|
||||
|
||||
from hippolyzer.lib.base.datatypes import Quaternion
|
||||
from hippolyzer.lib.proxy.addon_utils import BaseAddon, SessionProperty
|
||||
from hippolyzer.lib.proxy.sessions import Session
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BodyPartMask(enum.IntFlag):
|
||||
"""Which joints to send the viewer as part of "move" puppetry command"""
|
||||
HEAD = 1 << 0
|
||||
FACE = 1 << 1
|
||||
LHAND = 1 << 2
|
||||
RHAND = 1 << 3
|
||||
FINGERS = 1 << 4
|
||||
|
||||
|
||||
def register_puppetry_command(func: Callable[[dict], Awaitable[None]]):
|
||||
"""Register a method as handling inbound puppetry commands from the viewer"""
|
||||
func._puppetry_command = True
|
||||
return func
|
||||
|
||||
|
||||
class PuppetryExampleAddon(BaseAddon):
|
||||
server_skeleton: Dict[str, Dict[str, Any]] = SessionProperty(dict)
|
||||
camera_num: int = SessionProperty(0)
|
||||
parts_active: BodyPartMask = SessionProperty(lambda: BodyPartMask(0x1F))
|
||||
puppetry_api: Optional[outleap.LLPuppetryAPI] = SessionProperty(None)
|
||||
leap_client: Optional[outleap.LEAPClient] = SessionProperty(None)
|
||||
|
||||
def handle_session_init(self, session: Session):
|
||||
if not session.leap_client:
|
||||
return
|
||||
self.puppetry_api = outleap.LLPuppetryAPI(session.leap_client)
|
||||
self.leap_client = session.leap_client
|
||||
self._schedule_task(self._serve())
|
||||
self._schedule_task(self._exorcist(session))
|
||||
|
||||
@register_puppetry_command
|
||||
async def enable_parts(self, args: dict):
|
||||
if (new_mask := args.get("parts_mask")) is not None:
|
||||
self.parts_active = BodyPartMask(new_mask)
|
||||
|
||||
@register_puppetry_command
|
||||
async def set_camera(self, args: dict):
|
||||
if (camera_num := args.get("camera_num")) is not None:
|
||||
self.camera_num = camera_num
|
||||
|
||||
@register_puppetry_command
|
||||
async def stop(self, _args: dict):
|
||||
LOG.info("Viewer asked us to stop puppetry")
|
||||
|
||||
@register_puppetry_command
|
||||
async def log(self, _args: dict):
|
||||
# Intentionally ignored, we don't care about things the viewer
|
||||
# asked us to log
|
||||
pass
|
||||
|
||||
@register_puppetry_command
|
||||
async def set_skeleton(self, args: dict):
|
||||
# Don't really care about what the viewer thinks the view of the skeleton is.
|
||||
# Just log store it.
|
||||
self.server_skeleton = args
|
||||
|
||||
async def _serve(self):
|
||||
"""Handle inbound puppetry commands from viewer in a loop"""
|
||||
async with self.leap_client.listen_scoped("puppetry.controller") as listener:
|
||||
while True:
|
||||
msg = await listener.get()
|
||||
cmd = msg["command"]
|
||||
handler = getattr(self, cmd, None)
|
||||
if handler is None or not hasattr(handler, "_puppetry_command"):
|
||||
LOG.warning(f"Unknown puppetry command {cmd!r}: {msg!r}")
|
||||
continue
|
||||
await handler(msg.get("args", {}))
|
||||
|
||||
async def _exorcist(self, session):
|
||||
"""Do the Linda Blair thing with your neck"""
|
||||
spin_rad = 0.0
|
||||
while True:
|
||||
await asyncio.sleep(0.05)
|
||||
if not session.main_region:
|
||||
continue
|
||||
# Wrap spin_rad around if necessary
|
||||
while spin_rad > math.pi:
|
||||
spin_rad -= math.pi * 2
|
||||
|
||||
# LEAP wants rot as a quaternion with just the imaginary parts.
|
||||
neck_rot = Quaternion.from_euler(0, 0, spin_rad).data(3)
|
||||
self.puppetry_api.move({
|
||||
"mNeck": {"no_constraint": True, "local_rot": neck_rot},
|
||||
})
|
||||
spin_rad += math.pi / 25
|
||||
|
||||
|
||||
addons = [PuppetryExampleAddon()]
|
||||
@@ -13,7 +13,7 @@ def _to_spongecase(val):
|
||||
|
||||
|
||||
def handle_lludp_message(session: Session, _region: ProxiedRegion, message: Message):
|
||||
ctx = session.addon_ctx
|
||||
ctx = session.addon_ctx[__name__]
|
||||
ctx.setdefault("spongecase", False)
|
||||
if message.name == "ChatFromViewer":
|
||||
chat = message["ChatData"]["Message"]
|
||||
|
||||
@@ -7,6 +7,8 @@ in the appropriate format.
|
||||
from pathlib import Path
|
||||
from typing import *
|
||||
|
||||
from hippolyzer.lib.base.mesh import LLMeshSerializer
|
||||
from hippolyzer.lib.base.serialization import BufferReader
|
||||
from hippolyzer.lib.base.templates import AssetType
|
||||
from hippolyzer.lib.proxy.addons import AddonManager
|
||||
from hippolyzer.lib.proxy.addon_utils import show_message, BaseAddon
|
||||
@@ -38,8 +40,11 @@ class UploaderAddon(BaseAddon):
|
||||
try:
|
||||
if asset_type == AssetType.MESH:
|
||||
# Kicking off a mesh upload works a little differently internally
|
||||
# Half-parse the mesh so that we can figure out how many faces it has
|
||||
reader = BufferReader("!", file_body)
|
||||
mesh = reader.read(LLMeshSerializer(parse_segment_contents=False))
|
||||
upload_token = await region.asset_uploader.initiate_mesh_upload(
|
||||
name, file_body, flags=flags
|
||||
name, mesh, flags=flags
|
||||
)
|
||||
else:
|
||||
upload_token = await region.asset_uploader.initiate_asset_upload(
|
||||
|
||||
@@ -191,7 +191,7 @@
|
||||
</size>
|
||||
</property>
|
||||
<property name="styleSheet">
|
||||
<string notr="true">color: rgb(80, 0, 0)</string>
|
||||
<string notr="true"/>
|
||||
</property>
|
||||
<property name="tabChangesFocus">
|
||||
<bool>true</bool>
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import Optional
|
||||
|
||||
import mitmproxy.ctx
|
||||
import mitmproxy.exceptions
|
||||
import outleap
|
||||
|
||||
from hippolyzer.lib.base import llsd
|
||||
from hippolyzer.lib.proxy.addons import AddonManager
|
||||
@@ -112,6 +113,7 @@ def start_proxy(session_manager: SessionManager, extra_addons: Optional[list] =
|
||||
|
||||
udp_proxy_port = session_manager.settings.SOCKS_PROXY_PORT
|
||||
http_proxy_port = session_manager.settings.HTTP_PROXY_PORT
|
||||
leap_port = session_manager.settings.LEAP_PORT
|
||||
if proxy_host is None:
|
||||
proxy_host = session_manager.settings.PROXY_BIND_ADDR
|
||||
|
||||
@@ -143,6 +145,10 @@ def start_proxy(session_manager: SessionManager, extra_addons: Optional[list] =
|
||||
coro = asyncio.start_server(server.handle_connection, proxy_host, udp_proxy_port)
|
||||
async_server = loop.run_until_complete(coro)
|
||||
|
||||
leap_server = outleap.LEAPBridgeServer(session_manager.leap_client_connected)
|
||||
coro = asyncio.start_server(leap_server.handle_connection, proxy_host, leap_port)
|
||||
async_leap_server = loop.run_until_complete(coro)
|
||||
|
||||
event_manager = MITMProxyEventManager(session_manager, flow_context)
|
||||
loop.create_task(event_manager.run())
|
||||
|
||||
@@ -169,6 +175,8 @@ def start_proxy(session_manager: SessionManager, extra_addons: Optional[list] =
|
||||
# Close the server
|
||||
print("Closing SOCKS server")
|
||||
async_server.close()
|
||||
print("Shutting down LEAP server")
|
||||
async_leap_server.close()
|
||||
print("Shutting down addons")
|
||||
AddonManager.shutdown()
|
||||
print("Waiting for SOCKS server to close")
|
||||
|
||||
@@ -231,7 +231,8 @@ class MessageLogWindow(QtWidgets.QMainWindow):
|
||||
"AvatarRenderInfo FirestormBridge ObjectAnimation ParcelDwellRequest ParcelAccessListRequest " \
|
||||
"ParcelDwellReply ParcelAccessListReply AttachedSoundGainChange " \
|
||||
"ParcelPropertiesRequest ParcelProperties GetObjectCost GetObjectPhysicsData ObjectImage " \
|
||||
"ViewerAsset GetTexture SetAlwaysRun GetDisplayNames MapImageService MapItemReply".split(" ")
|
||||
"ViewerAsset GetTexture SetAlwaysRun GetDisplayNames MapImageService MapItemReply " \
|
||||
"AgentFOV".split(" ")
|
||||
DEFAULT_FILTER = f"!({' || '.join(ignored for ignored in DEFAULT_IGNORE)})"
|
||||
|
||||
textRequest: QtWidgets.QTextEdit
|
||||
|
||||
@@ -193,7 +193,7 @@
|
||||
</size>
|
||||
</property>
|
||||
<property name="styleSheet">
|
||||
<string notr="true">color: rgb(80, 0, 0)</string>
|
||||
<string notr="true"/>
|
||||
</property>
|
||||
<property name="tabChangesFocus">
|
||||
<bool>true</bool>
|
||||
@@ -213,7 +213,7 @@
|
||||
</widget>
|
||||
<widget class="QPlainTextEdit" name="textResponse">
|
||||
<property name="styleSheet">
|
||||
<string notr="true">color: rgb(0, 0, 80)</string>
|
||||
<string notr="true"/>
|
||||
</property>
|
||||
<property name="tabChangesFocus">
|
||||
<bool>true</bool>
|
||||
|
||||
@@ -15,7 +15,7 @@ import logging
|
||||
import os.path
|
||||
import secrets
|
||||
import sys
|
||||
from typing import Dict, List, Optional, Union, Sequence
|
||||
from typing import Dict, Optional
|
||||
|
||||
import collada
|
||||
import collada.source
|
||||
@@ -24,23 +24,20 @@ from lxml import etree
|
||||
import numpy as np
|
||||
import transformations
|
||||
|
||||
from hippolyzer.lib.base.datatypes import Vector3
|
||||
from hippolyzer.lib.base.helpers import get_resource_filename
|
||||
from hippolyzer.lib.base.serialization import BufferReader
|
||||
from hippolyzer.lib.base.mesh import LLMeshSerializer, MeshAsset, positions_from_domain, SkinSegmentDict
|
||||
from hippolyzer.lib.base.mesh import (
|
||||
LLMeshSerializer,
|
||||
MeshAsset,
|
||||
positions_from_domain,
|
||||
SkinSegmentDict,
|
||||
llsd_to_mat4,
|
||||
)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
||||
def llsd_to_mat4(mat: Union[np.ndarray, Sequence[float]]) -> np.ndarray:
|
||||
return np.array(mat).reshape((4, 4), order='F')
|
||||
|
||||
|
||||
def mat4_to_llsd(mat: np.ndarray) -> List[float]:
|
||||
return list(mat.flatten(order='F'))
|
||||
|
||||
|
||||
def mat4_to_collada(mat: np.ndarray) -> np.ndarray:
|
||||
return mat.flatten(order='C')
|
||||
|
||||
@@ -98,7 +95,7 @@ def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
|
||||
reflective=0.0,
|
||||
shadingtype="blinn",
|
||||
shininess=0.0,
|
||||
diffuse=(0.0, 0.0, 0.0),
|
||||
diffuse=(1.0, 1.0, 1.0),
|
||||
)
|
||||
mat = collada.material.Material(f"material{sub_uniq}", f"material{sub_uniq}", effect)
|
||||
|
||||
@@ -190,6 +187,8 @@ def llmesh_to_node(ll_mesh: MeshAsset, dae: collada.Collada, uniq=None,
|
||||
if should_skin:
|
||||
# We need a skeleton per _mesh asset_ because you could have incongruous skeletons
|
||||
# within the same linkset.
|
||||
# TODO: can we maintain some kind of skeleton cache, where if this skeleton has no conflicts
|
||||
# with another skeleton in the cache, we just use that skeleton and add any additional joints?
|
||||
skel_root = load_skeleton_nodes()
|
||||
transform_skeleton(skel_root, dae, skin_seg)
|
||||
skel = collada.scene.Node.load(dae, skel_root, {})
|
||||
@@ -211,7 +210,6 @@ def load_skeleton_nodes() -> etree.ElementBase:
|
||||
def transform_skeleton(skel_root: etree.ElementBase, dae: collada.Collada, skin_seg: SkinSegmentDict,
|
||||
include_unreferenced_bones=False):
|
||||
"""Update skeleton XML nodes to account for joint translations in the mesh"""
|
||||
# TODO: Use translation component only.
|
||||
joint_nodes: Dict[str, collada.scene.Node] = {}
|
||||
for skel_node in skel_root.iter():
|
||||
# xpath is loathsome so this is easier.
|
||||
@@ -262,48 +260,61 @@ def _create_mat4_source(name: str, data: np.ndarray, semantic: str):
|
||||
return source
|
||||
|
||||
|
||||
def fix_weird_bind_matrices(skin_seg: SkinSegmentDict):
|
||||
def fix_weird_bind_matrices(skin_seg: SkinSegmentDict) -> None:
|
||||
"""
|
||||
Fix weird-looking bind matrices to have normal scaling and rotations
|
||||
Fix weird-looking bind matrices to have sensible scaling and rotations
|
||||
|
||||
Not sure why these even happen (weird mesh authoring programs?)
|
||||
Sometimes get enormous inverse bind matrices (each component 10k+) and tiny
|
||||
Sometimes we get enormous inverse bind matrices (each component 10k+) and tiny
|
||||
bind shape matrix components. This detects inverse bind shape matrices
|
||||
with weird scales and tries to set them to what they "should" be without
|
||||
the weird inverted scaling.
|
||||
"""
|
||||
scale_fixup = Vector3(1, 1, 1)
|
||||
angle_fixup = Vector3(0, 0, 0)
|
||||
have_fixups = False
|
||||
# Totally non-scientific method of detecting odd bind matrices based on squinting very,
|
||||
# very hard at a random sample of assets.
|
||||
for joint_name, joint_inv in zip(skin_seg['joint_names'], skin_seg['inverse_bind_matrix']):
|
||||
if not joint_name.startswith("m"):
|
||||
# We can't make very good guesses based on collision volume scales and rotations,
|
||||
# skip anything but the "m" joints.
|
||||
continue
|
||||
joint_mat = llsd_to_mat4(joint_inv)
|
||||
joint_scale, _, joint_angle, _, _ = transformations.decompose_matrix(joint_mat)
|
||||
# If the scale component of an mJointName joint isn't roughly <1,1,1>, we likely have
|
||||
# scaling applied to the inverse bind matrices rather than the bind matrix. Figure out
|
||||
# what the fixup should be so that we can reverse it.
|
||||
if abs(3.0 - sum(joint_scale)) > 0.5:
|
||||
scale_fixup = Vector3(1, 1, 1) / Vector3(*joint_scale)
|
||||
have_fixups = True
|
||||
# I wouldn't expect mJointName joints to be rotated at all in their inverse bind matrices.
|
||||
# Is this a rotation that should've been applied to the bind shape matrix instead?
|
||||
# In any event, all joints are likely rotated by this amount, so calculate the inverse.
|
||||
if abs(sum(joint_angle)) > 0.05:
|
||||
angle_fixup = -Vector3(*joint_angle)
|
||||
have_fixups = True
|
||||
|
||||
if have_fixups:
|
||||
LOG.warning("Detected weird matrices in mesh!", scale_fixup, angle_fixup)
|
||||
# The magnitude of the scales in the inverse bind matrices look very strange.
|
||||
# The bind matrix itself is probably messed up as well, try to fix it.
|
||||
# TODO: DON'T MESS WITH INVERSE TRANSLATION!!!! Only bind shape gets its translation scaled.
|
||||
# TODO: put this back in, the previous logic was totally wrong-headed..
|
||||
pass
|
||||
# Sometimes we get mesh assets that have the vertex data naturally in y-up orientation,
|
||||
# and get re-oriented to z-up not through the bind shape matrix, but through the
|
||||
# transforms in the inverse bind matrices!
|
||||
#
|
||||
# Blender, for one, does not like this very much, and generally won't generate mesh
|
||||
# assets like this, as explained here https://developer.blender.org/T38660.
|
||||
# In vanilla Blender, these mesh assets will show up scaled and rotated _only_ according
|
||||
# to the bind shape matrix, which may end up with the model 25 meters tall and sitting
|
||||
# on its side.
|
||||
#
|
||||
# https://avalab.org/avastar/292/knowledge/compare-workbench/, while somewhat outdated,
|
||||
# has some information on rest pose vs default pose and scaling that I believe is relevant.
|
||||
# https://github.com/KhronosGroup/glTF-Blender-IO/issues/994 as well.
|
||||
#
|
||||
# While trying to figure out what was going on, I searched for something like
|
||||
# "inverse bind matrix scale collada", "bind pose scale blender", etc. Pretty much every
|
||||
# result was either a bug filed by, or a question asked by the creator of Avastar, or an SL user.
|
||||
# I think that says a lot about how annoying it is to author mesh for SL in particular.
|
||||
#
|
||||
# I spent a good month or so tearing my hair out over this wondering how these values could
|
||||
# even be possible. I wasn't sure how I should write mesh import code if I don't understand
|
||||
# how to interpret existing data, or how it even ended up the way it did. Turns out I wasn't
|
||||
# misinterpreting the data, the data really is just weird.
|
||||
#
|
||||
# I'd also had the idea that you could sniff which body a given rigged asset was meant
|
||||
# for by doing trivial matching on the inverse bind matrices, but obviously that isn't true!
|
||||
#
|
||||
# Basically:
|
||||
# 1) Maya is evil and generates evil, this evil bleeds into SL's assets through transforms.
|
||||
# 2) Blender is also evil, but in a manner that doesn't agree with Maya's evil.
|
||||
# 3) Collada was a valiant effort, but is evil in practice. Seemingly simple Collada
|
||||
# files are interpreted completely differently by Blender, Maya, and sometimes SL.
|
||||
# 4) Those three evils collude to make an interop nightmare for everyone like "oh my rigger
|
||||
# rigs using Maya and now my model is huge and all my normals are fucked on reimport"
|
||||
# 5) Yes, there's still good reasons to be using Avastar in 2022 even though nobody authoring
|
||||
# rigged mesh for any other use has to use something similar.
|
||||
|
||||
if not skin_seg['joint_names']:
|
||||
return
|
||||
|
||||
# TODO: calculate the correct inverse bind matrix scale & rotations from avatar_skeleton.xml
|
||||
# definitions. If the rotation and scale factors are the same across all inverse bind matrices then
|
||||
# they can be moved over to the bind shape matrix to keep Blender happy.
|
||||
# Maybe add a scaled / rotated empty as a parent for the armature instead?
|
||||
return
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -39,12 +39,13 @@ class _IterableStub:
|
||||
__iter__: Callable
|
||||
|
||||
|
||||
class TupleCoord(recordclass.datatuple, _IterableStub): # type: ignore
|
||||
__options__ = {
|
||||
"fast_new": False,
|
||||
}
|
||||
RAD_TO_DEG = 180 / math.pi
|
||||
|
||||
|
||||
class TupleCoord(recordclass.RecordClass, _IterableStub):
|
||||
def __init__(self, *args):
|
||||
# Only to help typing, doesn't actually do anything.
|
||||
# All the important stuff happens in `__new__()`
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@@ -364,7 +365,7 @@ def flags_to_pod(flag_cls: Type[enum.IntFlag], val: int) -> Tuple[Union[str, int
|
||||
return tuple(flag.name for flag in iter(flag_cls) if val & flag.value) + extra
|
||||
|
||||
|
||||
class TaggedUnion(recordclass.datatuple): # type: ignore
|
||||
class TaggedUnion(recordclass.RecordClass):
|
||||
tag: Any
|
||||
value: Any
|
||||
|
||||
@@ -372,5 +373,5 @@ class TaggedUnion(recordclass.datatuple): # type: ignore
|
||||
__all__ = [
|
||||
"Vector3", "Vector4", "Vector2", "Quaternion", "TupleCoord",
|
||||
"UUID", "RawBytes", "StringEnum", "JankStringyBytes", "TaggedUnion",
|
||||
"IntEnum", "IntFlag", "flags_to_pod", "Pretty"
|
||||
"IntEnum", "IntFlag", "flags_to_pod", "Pretty", "RAD_TO_DEG"
|
||||
]
|
||||
|
||||
@@ -59,17 +59,15 @@ class Event:
|
||||
continue
|
||||
if one_shot:
|
||||
self.unsubscribe(instance, *inner_args, **kwargs)
|
||||
if instance(args, *inner_args, **kwargs):
|
||||
if instance(args, *inner_args, **kwargs) and not one_shot:
|
||||
self.unsubscribe(instance, *inner_args, **kwargs)
|
||||
|
||||
def get_subscriber_count(self):
|
||||
def __len__(self):
|
||||
return len(self.subscribers)
|
||||
|
||||
def clear_subscribers(self):
|
||||
self.subscribers.clear()
|
||||
return self
|
||||
|
||||
__iadd__ = subscribe
|
||||
__isub__ = unsubscribe
|
||||
__call__ = notify
|
||||
__len__ = get_subscriber_count
|
||||
|
||||
@@ -176,7 +176,7 @@ class MessageTemplateNotFound(MessageSystemError):
|
||||
self.template = template
|
||||
|
||||
def __str__(self):
|
||||
return "No message template found, context: '%s'" % self.context
|
||||
return "No message template found for %s, context: '%s'" % (self.template, self.context)
|
||||
|
||||
|
||||
class MessageTemplateParsingError(MessageSystemError):
|
||||
|
||||
528
hippolyzer/lib/base/gltftools.py
Normal file
528
hippolyzer/lib/base/gltftools.py
Normal file
@@ -0,0 +1,528 @@
|
||||
"""
|
||||
WIP LLMesh -> glTF converter, for testing eventual glTF -> LLMesh conversion logic.
|
||||
"""
|
||||
# TODO:
|
||||
# * Simple tests
|
||||
# * Round-tripping skinning data from Blender-compatible glTF back to LLMesh (maybe through rig retargeting?)
|
||||
# * Panda3D-glTF viewer for LLMesh? The glTFs seem to work fine in Panda3D-glTF's `gltf-viewer`.
|
||||
# * Check if skew and projection components of transform matrices are ignored in practice as the spec requires.
|
||||
# I suppose this would render some real assets impossible to represent with glTF.
|
||||
|
||||
import dataclasses
|
||||
import math
|
||||
import pprint
|
||||
import sys
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import *
|
||||
|
||||
import gltflib
|
||||
import numpy as np
|
||||
import transformations
|
||||
|
||||
from hippolyzer.lib.base.datatypes import Vector3
|
||||
from hippolyzer.lib.base.mesh import (
|
||||
LLMeshSerializer, MeshAsset, positions_from_domain, SkinSegmentDict, VertexWeight, llsd_to_mat4
|
||||
)
|
||||
from hippolyzer.lib.base.mesh_skeleton import AVATAR_SKELETON
|
||||
from hippolyzer.lib.base.serialization import BufferReader
|
||||
|
||||
|
||||
class IdentityList(list):
|
||||
"""
|
||||
List, but does index() by object identity, not equality
|
||||
|
||||
GLTF references objects by their index within some list, but we prefer to pass around
|
||||
actual object references internally. If we don't do this, then when we try and get
|
||||
a GLTF reference to a given object via `.index()` then we could end up actually getting
|
||||
a reference to some other object that just happens to be equal. This was causing issues
|
||||
with all primitives ending up with the same material, due to the default material's value
|
||||
being the same across all primitives.
|
||||
"""
|
||||
def index(self, value, start: Optional[int] = None, stop: Optional[int] = None) -> int:
|
||||
view = self[start:stop]
|
||||
for i, x in enumerate(view):
|
||||
if x is value:
|
||||
if start:
|
||||
return i + start
|
||||
return i
|
||||
raise ValueError(value)
|
||||
|
||||
|
||||
def sl_to_gltf_coords(coords):
|
||||
"""
|
||||
SL (X, Y, Z) -> GL (X, Z, Y), as GLTF commandeth
|
||||
|
||||
Note that this will only work when reordering axes, flipping an axis is more complicated.
|
||||
"""
|
||||
return coords[0], coords[2], coords[1], *coords[3:]
|
||||
|
||||
|
||||
def sl_to_gltf_uv(uv):
|
||||
"""Flip the V coordinate of a UV to match glTF convention"""
|
||||
return [uv[0], -uv[1]]
|
||||
|
||||
|
||||
def sl_mat4_to_gltf(mat: np.ndarray) -> List[float]:
|
||||
"""
|
||||
Convert an SL Mat4 to the glTF coordinate system
|
||||
|
||||
This should only be done immediately before storing the matrix in a glTF structure!
|
||||
"""
|
||||
# TODO: This is probably not correct. We definitely need to flip Z but there's
|
||||
# probably a better way to do it.
|
||||
decomp = [sl_to_gltf_coords(x) for x in transformations.decompose_matrix(mat)]
|
||||
trans = decomp[3]
|
||||
decomp[3] = (trans[0], trans[1], -trans[2])
|
||||
return list(transformations.compose_matrix(*decomp).flatten(order='F'))
|
||||
|
||||
|
||||
# Mat3 to convert points from SL coordinate space to GLTF coordinate space
|
||||
POINT_TO_GLTF_MAT = transformations.compose_matrix(angles=(-(math.pi / 2), 0, 0))[:3, :3]
|
||||
|
||||
|
||||
def sl_vec3_array_to_gltf(vec_list: np.ndarray) -> np.ndarray:
|
||||
new_array = []
|
||||
for x in vec_list:
|
||||
new_array.append(POINT_TO_GLTF_MAT.dot(x))
|
||||
return np.array(new_array)
|
||||
|
||||
|
||||
def sl_weights_to_gltf(sl_weights: List[List[VertexWeight]]) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Convert SL Weights to separate JOINTS_0 and WEIGHTS_0 vec4 arrays"""
|
||||
joints = np.zeros((len(sl_weights), 4), dtype=np.uint8)
|
||||
weights = np.zeros((len(sl_weights), 4), dtype=np.float32)
|
||||
|
||||
for i, vert_weights in enumerate(sl_weights):
|
||||
# We need to re-normalize these since the quantization can mess them up
|
||||
collected_weights = []
|
||||
for j, vert_weight in enumerate(vert_weights):
|
||||
joints[i, j] = vert_weight.joint_idx
|
||||
collected_weights.append(vert_weight.weight)
|
||||
weight_sum = sum(collected_weights)
|
||||
if weight_sum:
|
||||
for j, weight in enumerate(collected_weights):
|
||||
weights[i, j] = weight / weight_sum
|
||||
|
||||
return joints, weights
|
||||
|
||||
|
||||
def normalize_vec3(a):
|
||||
norm = np.linalg.norm(a)
|
||||
if norm == 0:
|
||||
return a
|
||||
return a / norm
|
||||
|
||||
|
||||
def apply_bind_shape_matrix(bind_shape_matrix: np.ndarray, verts: np.ndarray, norms: np.ndarray) \
|
||||
-> Tuple[np.ndarray, np.ndarray]:
|
||||
"""
|
||||
Apply the bind shape matrix to the mesh data
|
||||
|
||||
glTF expects all verts and normals to be in armature-local space so that mesh data can be shared
|
||||
between differently-oriented armatures. Or something.
|
||||
# https://github.com/KhronosGroup/glTF-Blender-IO/issues/566#issuecomment-523119339
|
||||
|
||||
glTF also doesn't have a concept of a "bind shape matrix" like Collada does
|
||||
per its skinning docs, so we have to mix it into the mesh data manually.
|
||||
See https://github.com/KhronosGroup/glTF-Tutorials/blob/master/gltfTutorial/gltfTutorial_020_Skins.md
|
||||
"""
|
||||
scale, _, angles, translation, _ = transformations.decompose_matrix(bind_shape_matrix)
|
||||
scale_mat = transformations.compose_matrix(scale=scale)[:3, :3]
|
||||
rot_mat = transformations.euler_matrix(*angles)[:3, :3]
|
||||
rot_scale_mat = scale_mat @ np.linalg.inv(rot_mat)
|
||||
|
||||
# Apply the SRT transform to each vert
|
||||
verts = (verts @ rot_scale_mat) + translation
|
||||
|
||||
# Our scale is unlikely to be uniform, so we have to fix up our normals as well.
|
||||
# https://paroj.github.io/gltut/Illumination/Tut09%20Normal%20Transformation.html
|
||||
inv_transpose_mat = np.transpose(np.linalg.inv(bind_shape_matrix)[:3, :3])
|
||||
new_norms = [normalize_vec3(inv_transpose_mat @ norm) for norm in norms]
|
||||
|
||||
return verts, np.array(new_norms)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class JointContext:
|
||||
node: gltflib.Node
|
||||
# Original matrix for the bone, may have custom translation, but otherwise the same.
|
||||
orig_matrix: np.ndarray
|
||||
# xform that must be applied to inverse bind matrices to account for the changed bone
|
||||
fixup_matrix: np.ndarray
|
||||
|
||||
|
||||
JOINT_CONTEXT_DICT = Dict[str, JointContext]
|
||||
|
||||
|
||||
class GLTFBuilder:
|
||||
def __init__(self, blender_compatibility=False):
|
||||
self.scene = gltflib.Scene(nodes=IdentityList())
|
||||
self.model = gltflib.GLTFModel(
|
||||
asset=gltflib.Asset(version="2.0"),
|
||||
accessors=IdentityList(),
|
||||
nodes=IdentityList(),
|
||||
materials=IdentityList(),
|
||||
buffers=IdentityList(),
|
||||
bufferViews=IdentityList(),
|
||||
meshes=IdentityList(),
|
||||
skins=IdentityList(),
|
||||
scenes=IdentityList((self.scene,)),
|
||||
extensionsUsed=["KHR_materials_specular"],
|
||||
scene=0,
|
||||
)
|
||||
self.gltf = gltflib.GLTF(
|
||||
model=self.model,
|
||||
resources=IdentityList(),
|
||||
)
|
||||
self.blender_compatibility = blender_compatibility
|
||||
|
||||
def add_nodes_from_llmesh(self, mesh: MeshAsset, name: str, mesh_transform: Optional[np.ndarray] = None):
|
||||
"""Build a glTF version of a mesh asset, appending it and its armature to the scene root"""
|
||||
# TODO: mesh data instancing?
|
||||
# consider https://github.com/KhronosGroup/glTF-Blender-IO/issues/1634.
|
||||
if mesh_transform is None:
|
||||
mesh_transform = np.identity(4)
|
||||
|
||||
skin_seg: Optional[SkinSegmentDict] = mesh.segments.get('skin')
|
||||
skin = None
|
||||
if skin_seg:
|
||||
mesh_transform = llsd_to_mat4(skin_seg['bind_shape_matrix'])
|
||||
joint_ctxs = self.add_joints(skin_seg)
|
||||
|
||||
# Give our armature a root node and parent the pelvis to it
|
||||
armature_node = self.add_node("Armature")
|
||||
self.scene.nodes.append(self.model.nodes.index(armature_node))
|
||||
armature_node.children.append(self.model.nodes.index(joint_ctxs['mPelvis'].node))
|
||||
skin = self.add_skin("Armature", joint_ctxs, skin_seg)
|
||||
skin.skeleton = self.model.nodes.index(armature_node)
|
||||
|
||||
primitives = []
|
||||
# Just the high LOD for now
|
||||
for submesh in mesh.segments['high_lod']:
|
||||
verts = np.array(positions_from_domain(submesh['Position'], submesh['PositionDomain']))
|
||||
norms = np.array(submesh['Normal'])
|
||||
tris = np.array(submesh['TriangleList'])
|
||||
joints = np.array([])
|
||||
weights = np.array([])
|
||||
range_uv = np.array([])
|
||||
if "TexCoord0" in submesh:
|
||||
range_uv = np.array(positions_from_domain(submesh['TexCoord0'], submesh['TexCoord0Domain']))
|
||||
if 'Weights' in submesh:
|
||||
joints, weights = sl_weights_to_gltf(submesh['Weights'])
|
||||
|
||||
if skin:
|
||||
# Convert verts and norms to armature-local space
|
||||
verts, norms = apply_bind_shape_matrix(mesh_transform, verts, norms)
|
||||
|
||||
primitives.append(self.add_primitive(
|
||||
tris=tris,
|
||||
positions=verts,
|
||||
normals=norms,
|
||||
uvs=range_uv,
|
||||
joints=joints,
|
||||
weights=weights,
|
||||
))
|
||||
|
||||
mesh_node = self.add_node(
|
||||
name,
|
||||
self.add_mesh(name, primitives),
|
||||
transform=mesh_transform,
|
||||
)
|
||||
if skin:
|
||||
# Node translation isn't relevant, we're going to use the bind matrices
|
||||
# If you pull this into Blender you may want to untick "Guess Original Bind Pose",
|
||||
# it guesses that based on the inverse bind matrices which may have Maya poisoning.
|
||||
# TODO: Maybe we could automatically undo that by comparing expected bone scale and rot
|
||||
# to scale and rot in the inverse bind matrices, and applying fixups to the
|
||||
# bind shape matrix and inverse bind matrices?
|
||||
mesh_node.matrix = None
|
||||
mesh_node.skin = self.model.skins.index(skin)
|
||||
|
||||
self.scene.nodes.append(self.model.nodes.index(mesh_node))
|
||||
|
||||
def add_node(
|
||||
self,
|
||||
name: str,
|
||||
mesh: Optional[gltflib.Mesh] = None,
|
||||
transform: Optional[np.ndarray] = None,
|
||||
) -> gltflib.Node:
|
||||
node = gltflib.Node(
|
||||
name=name,
|
||||
mesh=self.model.meshes.index(mesh) if mesh else None,
|
||||
matrix=sl_mat4_to_gltf(transform) if transform is not None else None,
|
||||
children=[],
|
||||
)
|
||||
self.model.nodes.append(node)
|
||||
return node
|
||||
|
||||
def add_mesh(
|
||||
self,
|
||||
name: str,
|
||||
primitives: List[gltflib.Primitive],
|
||||
) -> gltflib.Mesh:
|
||||
for i, prim in enumerate(primitives):
|
||||
# Give the materials a name relating to what "face" they belong to
|
||||
self.model.materials[prim.material].name = f"{name}.{i:03}"
|
||||
mesh = gltflib.Mesh(name=name, primitives=primitives)
|
||||
self.model.meshes.append(mesh)
|
||||
return mesh
|
||||
|
||||
def add_primitive(
|
||||
self,
|
||||
tris: np.ndarray,
|
||||
positions: np.ndarray,
|
||||
normals: np.ndarray,
|
||||
uvs: np.ndarray,
|
||||
weights: np.ndarray,
|
||||
joints: np.ndarray,
|
||||
) -> gltflib.Primitive:
|
||||
# Make a Material for the primitive. Materials pretty much _are_ the primitives in
|
||||
# LLMesh, so just make them both in one go. We need a unique material for each primitive.
|
||||
material = gltflib.Material(
|
||||
pbrMetallicRoughness=gltflib.PBRMetallicRoughness(
|
||||
baseColorFactor=[1.0, 1.0, 1.0, 1.0],
|
||||
metallicFactor=0.0,
|
||||
roughnessFactor=0.0,
|
||||
),
|
||||
extensions={
|
||||
"KHR_materials_specular": {
|
||||
"specularFactor": 0.0,
|
||||
"specularColorFactor": [0, 0, 0]
|
||||
},
|
||||
}
|
||||
)
|
||||
self.model.materials.append(material)
|
||||
|
||||
attributes = gltflib.Attributes(
|
||||
POSITION=self.maybe_add_vec_array(sl_vec3_array_to_gltf(positions), gltflib.AccessorType.VEC3),
|
||||
NORMAL=self.maybe_add_vec_array(sl_vec3_array_to_gltf(normals), gltflib.AccessorType.VEC3),
|
||||
TEXCOORD_0=self.maybe_add_vec_array(np.array([sl_to_gltf_uv(uv) for uv in uvs]), gltflib.AccessorType.VEC2),
|
||||
JOINTS_0=self.maybe_add_vec_array(joints, gltflib.AccessorType.VEC4, gltflib.ComponentType.UNSIGNED_BYTE),
|
||||
WEIGHTS_0=self.maybe_add_vec_array(weights, gltflib.AccessorType.VEC4),
|
||||
)
|
||||
|
||||
return gltflib.Primitive(
|
||||
attributes=attributes,
|
||||
indices=self.model.accessors.index(self.add_scalars(tris)),
|
||||
material=self.model.materials.index(material),
|
||||
mode=gltflib.PrimitiveMode.TRIANGLES,
|
||||
)
|
||||
|
||||
def add_scalars(self, scalars: np.ndarray) -> gltflib.Accessor:
|
||||
"""
|
||||
Add a potentially multidimensional array of scalars, returning the accessor
|
||||
|
||||
Generally only used for triangle indices
|
||||
"""
|
||||
scalar_bytes = scalars.astype(np.uint32).flatten().tobytes()
|
||||
buffer_view = self.add_buffer_view(scalar_bytes, None)
|
||||
accessor = gltflib.Accessor(
|
||||
bufferView=self.model.bufferViews.index(buffer_view),
|
||||
componentType=gltflib.ComponentType.UNSIGNED_INT,
|
||||
count=scalars.size, # use the flattened size!
|
||||
type=gltflib.AccessorType.SCALAR.value, # type: ignore
|
||||
min=[int(scalars.min())], # type: ignore
|
||||
max=[int(scalars.max())], # type: ignore
|
||||
)
|
||||
self.model.accessors.append(accessor)
|
||||
return accessor
|
||||
|
||||
def maybe_add_vec_array(
|
||||
self,
|
||||
vecs: np.ndarray,
|
||||
vec_type: gltflib.AccessorType,
|
||||
component_type: gltflib.ComponentType = gltflib.ComponentType.FLOAT,
|
||||
) -> Optional[int]:
|
||||
if not vecs.size:
|
||||
return None
|
||||
accessor = self.add_vec_array(vecs, vec_type, component_type)
|
||||
return self.model.accessors.index(accessor)
|
||||
|
||||
def add_vec_array(
|
||||
self,
|
||||
vecs: np.ndarray,
|
||||
vec_type: gltflib.AccessorType,
|
||||
component_type: gltflib.ComponentType = gltflib.ComponentType.FLOAT
|
||||
) -> gltflib.Accessor:
|
||||
"""
|
||||
Add a two-dimensional array of vecs (positions, normals, weights, UVs) returning the accessor
|
||||
|
||||
Vec type may be a vec2, vec3, or a vec4.
|
||||
"""
|
||||
# Pretty much all of these are float32 except the ones that aren't
|
||||
dtype = np.float32
|
||||
if component_type == gltflib.ComponentType.UNSIGNED_BYTE:
|
||||
dtype = np.uint8
|
||||
vec_data = vecs.astype(dtype).tobytes()
|
||||
buffer_view = self.add_buffer_view(vec_data, target=None)
|
||||
accessor = gltflib.Accessor(
|
||||
bufferView=self.model.bufferViews.index(buffer_view),
|
||||
componentType=component_type,
|
||||
count=len(vecs),
|
||||
type=vec_type.value, # type: ignore
|
||||
min=vecs.min(axis=0).tolist(), # type: ignore
|
||||
max=vecs.max(axis=0).tolist(), # type: ignore
|
||||
)
|
||||
self.model.accessors.append(accessor)
|
||||
return accessor
|
||||
|
||||
def add_buffer_view(self, data: bytes, target: Optional[gltflib.BufferTarget]) -> gltflib.BufferView:
|
||||
"""Create a buffer view and associated buffer and resource for a blob of data"""
|
||||
resource = gltflib.FileResource(filename=f"res-{uuid.uuid4()}.bin", data=data)
|
||||
self.gltf.resources.append(resource)
|
||||
|
||||
buffer = gltflib.Buffer(uri=resource.filename, byteLength=len(resource.data))
|
||||
self.model.buffers.append(buffer)
|
||||
|
||||
buffer_view = gltflib.BufferView(
|
||||
buffer=self.model.buffers.index(buffer),
|
||||
byteLength=buffer.byteLength,
|
||||
byteOffset=0,
|
||||
target=target
|
||||
)
|
||||
self.model.bufferViews.append(buffer_view)
|
||||
return buffer_view
|
||||
|
||||
def add_joints(self, skin: SkinSegmentDict) -> JOINT_CONTEXT_DICT:
|
||||
# There may be some joints not present in the mesh that we need to add to reach the mPelvis root
|
||||
required_joints = set()
|
||||
for joint_name in skin['joint_names']:
|
||||
joint_node = AVATAR_SKELETON[joint_name]
|
||||
required_joints.add(joint_node)
|
||||
required_joints.update(joint_node.ancestors)
|
||||
|
||||
# If this is present, it may override the joint positions from the skeleton definition
|
||||
if 'alt_inverse_bind_matrix' in skin:
|
||||
joint_overrides = dict(zip(skin['joint_names'], skin['alt_inverse_bind_matrix']))
|
||||
else:
|
||||
joint_overrides = {}
|
||||
|
||||
built_joints: JOINT_CONTEXT_DICT = {}
|
||||
for joint in required_joints:
|
||||
joint_matrix = joint.matrix
|
||||
|
||||
# Do we have a joint position override that would affect joint_matrix?
|
||||
override = joint_overrides.get(joint.name)
|
||||
if override:
|
||||
decomp = list(transformations.decompose_matrix(joint_matrix))
|
||||
# We specifically only want the translation from the override!
|
||||
translation = transformations.translation_from_matrix(llsd_to_mat4(override))
|
||||
# Only do it if the difference is over 0.1mm though
|
||||
if Vector3.dist(Vector3(*translation), joint.translation) > 0.0001:
|
||||
decomp[3] = translation
|
||||
joint_matrix = transformations.compose_matrix(*decomp)
|
||||
|
||||
# Do we need to mess with the bone's matrices to make Blender cooperate?
|
||||
orig_matrix = joint_matrix
|
||||
fixup_matrix = np.identity(4)
|
||||
if self.blender_compatibility:
|
||||
joint_matrix, fixup_matrix = self._fix_blender_joint(joint_matrix)
|
||||
|
||||
# TODO: populate "extras" here with the metadata the Blender collada stuff uses to store
|
||||
# "bind_mat" and "rest_mat" so we can go back to our original matrices when exporting
|
||||
# from blender to .dae!
|
||||
gltf_joint = self.add_node(joint.name, transform=joint_matrix)
|
||||
|
||||
# Store the node along with any fixups we may need to apply to the bind matrices later
|
||||
built_joints[joint.name] = JointContext(gltf_joint, orig_matrix, fixup_matrix)
|
||||
|
||||
# Add each joint to the child list of their respective parent
|
||||
for joint_name, joint_ctx in built_joints.items():
|
||||
if parent := AVATAR_SKELETON[joint_name].parent:
|
||||
built_joints[parent().name].node.children.append(self.model.nodes.index(joint_ctx.node))
|
||||
return built_joints
|
||||
|
||||
def _fix_blender_joint(self, joint_matrix: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""
|
||||
Split a joint matrix into a joint matrix and fixup matrix
|
||||
|
||||
If we don't account for weird scaling on the collision volumes, then
|
||||
Blender freaks out. This is an issue in blender where it doesn't
|
||||
apply the inverse bind matrices relative to the scale and rotation of
|
||||
the bones themselves, as it should per the glTF spec. Blender's glTF loader
|
||||
tries to recover from this by applying certain transforms as a pose, but
|
||||
the damage has been done by that point. Nobody else runs really runs into
|
||||
this because they have the good sense to not use some nightmare abomination
|
||||
rig with scaling and rotation on the skeleton like SL does.
|
||||
|
||||
Blender will _only_ correctly handle the translation component of the joint,
|
||||
any other transforms need to be mixed into the inverse bind matrices themselves.
|
||||
There's no internal concept of bone scale or rot in Blender right now.
|
||||
|
||||
Should investigate an Avastar-style approach of optionally retargeting
|
||||
to a Blender-compatible rig with translation-only bones, and modify
|
||||
the bind matrices to accommodate. The glTF importer supports metadata through
|
||||
the "extras" fields, so we can potentially abuse the "bind_mat" metadata field
|
||||
that Blender already uses for the "Keep Bind Info" Collada import / export hack.
|
||||
|
||||
For context:
|
||||
* https://github.com/KhronosGroup/glTF-Blender-IO/issues/1305
|
||||
* https://developer.blender.org/T38660 (these are Collada, but still relevant)
|
||||
* https://developer.blender.org/T29246
|
||||
* https://developer.blender.org/T50412
|
||||
* https://developer.blender.org/T53620 (FBX but still relevant)
|
||||
"""
|
||||
scale, shear, angles, translate, projection = transformations.decompose_matrix(joint_matrix)
|
||||
joint_matrix = transformations.compose_matrix(translate=translate)
|
||||
fixup_matrix = transformations.compose_matrix(scale=scale, angles=angles)
|
||||
return joint_matrix, fixup_matrix
|
||||
|
||||
def add_skin(self, name: str, joint_nodes: JOINT_CONTEXT_DICT, skin_seg: SkinSegmentDict) -> gltflib.Skin:
|
||||
joints_arr = []
|
||||
for joint_name in skin_seg['joint_names']:
|
||||
joint_ctx = joint_nodes[joint_name]
|
||||
joints_arr.append(self.model.nodes.index(joint_ctx.node))
|
||||
|
||||
inv_binds = []
|
||||
for joint_name, inv_bind in zip(skin_seg['joint_names'], skin_seg['inverse_bind_matrix']):
|
||||
joint_ctx = joint_nodes[joint_name]
|
||||
inv_bind = joint_ctx.fixup_matrix @ llsd_to_mat4(inv_bind)
|
||||
inv_binds.append(sl_mat4_to_gltf(inv_bind))
|
||||
inv_binds_data = np.array(inv_binds, dtype=np.float32).tobytes()
|
||||
buffer_view = self.add_buffer_view(inv_binds_data, target=None)
|
||||
accessor = gltflib.Accessor(
|
||||
bufferView=self.model.bufferViews.index(buffer_view),
|
||||
componentType=gltflib.ComponentType.FLOAT,
|
||||
count=len(inv_binds),
|
||||
type=gltflib.AccessorType.MAT4.value, # type: ignore
|
||||
)
|
||||
self.model.accessors.append(accessor)
|
||||
accessor_idx = self.model.accessors.index(accessor)
|
||||
|
||||
skin = gltflib.Skin(name=name, joints=joints_arr, inverseBindMatrices=accessor_idx)
|
||||
self.model.skins.append(skin)
|
||||
return skin
|
||||
|
||||
def finalize(self):
|
||||
"""Clean up the mesh to pass the glTF smell test, should be done last"""
|
||||
def _nullify_empty_lists(dc):
|
||||
for field in dataclasses.fields(dc):
|
||||
# Empty lists should be replaced with None
|
||||
if getattr(dc, field.name) == []:
|
||||
setattr(dc, field.name, None)
|
||||
|
||||
for node in self.model.nodes:
|
||||
_nullify_empty_lists(node)
|
||||
_nullify_empty_lists(self.model)
|
||||
return self.gltf
|
||||
|
||||
|
||||
def main():
|
||||
# Take an llmesh file as an argument and spit out basename-converted.gltf
|
||||
with open(sys.argv[1], "rb") as f:
|
||||
reader = BufferReader("<", f.read())
|
||||
|
||||
filename = Path(sys.argv[1]).stem
|
||||
mesh: MeshAsset = reader.read(LLMeshSerializer(parse_segment_contents=True))
|
||||
|
||||
builder = GLTFBuilder(blender_compatibility=True)
|
||||
builder.add_nodes_from_llmesh(mesh, filename)
|
||||
gltf = builder.finalize()
|
||||
|
||||
pprint.pprint(gltf.model)
|
||||
gltf.export_glb(sys.argv[1].rsplit(".", 1)[0] + "-converted.gltf")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -147,7 +147,7 @@ def get_resource_filename(resource_filename: str):
|
||||
return pkg_resources.resource_filename("hippolyzer", resource_filename)
|
||||
|
||||
|
||||
def to_chunks(chunkable: Sequence[_T], chunk_size: int) -> Generator[_T, None, None]:
|
||||
def to_chunks(chunkable: Sequence[_T], chunk_size: int) -> Generator[Sequence[_T], None, None]:
|
||||
while chunkable:
|
||||
yield chunkable[:chunk_size]
|
||||
chunkable = chunkable[chunk_size:]
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
import calendar
|
||||
import datetime
|
||||
import struct
|
||||
import typing
|
||||
import uuid
|
||||
import zlib
|
||||
|
||||
from llbase.llsd import *
|
||||
from llsd import *
|
||||
# So we can directly reference the original wrapper funcs where necessary
|
||||
import llbase.llsd
|
||||
import llsd as base_llsd
|
||||
from llsd.base import is_string, is_unicode
|
||||
|
||||
from hippolyzer.lib.base.datatypes import *
|
||||
|
||||
|
||||
class HippoLLSDBaseFormatter(llbase.llsd.LLSDBaseFormatter):
|
||||
class HippoLLSDBaseFormatter(base_llsd.base.LLSDBaseFormatter):
|
||||
UUID: callable
|
||||
ARRAY: callable
|
||||
|
||||
@@ -24,12 +29,12 @@ class HippoLLSDBaseFormatter(llbase.llsd.LLSDBaseFormatter):
|
||||
return self.ARRAY(v.data())
|
||||
|
||||
|
||||
class HippoLLSDXMLFormatter(llbase.llsd.LLSDXMLFormatter, HippoLLSDBaseFormatter):
|
||||
class HippoLLSDXMLFormatter(base_llsd.serde_xml.LLSDXMLFormatter, HippoLLSDBaseFormatter):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
|
||||
class HippoLLSDXMLPrettyFormatter(llbase.llsd.LLSDXMLPrettyFormatter, HippoLLSDBaseFormatter):
|
||||
class HippoLLSDXMLPrettyFormatter(base_llsd.serde_xml.LLSDXMLPrettyFormatter, HippoLLSDBaseFormatter):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@@ -42,7 +47,7 @@ def format_xml(val: typing.Any):
|
||||
return HippoLLSDXMLFormatter().format(val)
|
||||
|
||||
|
||||
class HippoLLSDNotationFormatter(llbase.llsd.LLSDNotationFormatter, HippoLLSDBaseFormatter):
|
||||
class HippoLLSDNotationFormatter(base_llsd.serde_notation.LLSDNotationFormatter, HippoLLSDBaseFormatter):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@@ -84,7 +89,7 @@ def _format_binary_recurse(something) -> bytes:
|
||||
return b'1'
|
||||
else:
|
||||
return b'0'
|
||||
elif is_integer(something):
|
||||
elif isinstance(something, int):
|
||||
try:
|
||||
return b'i' + struct.pack('!i', something)
|
||||
except (OverflowError, struct.error) as exc:
|
||||
@@ -129,7 +134,7 @@ def _format_binary_recurse(something) -> bytes:
|
||||
(type(something), something))
|
||||
|
||||
|
||||
class HippoLLSDBinaryParser(llbase.llsd.LLSDBinaryParser):
|
||||
class HippoLLSDBinaryParser(base_llsd.serde_binary.LLSDBinaryParser):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._dispatch[ord('u')] = lambda: UUID(bytes=self._getc(16))
|
||||
@@ -162,11 +167,11 @@ def parse_binary(data: bytes):
|
||||
|
||||
|
||||
def parse_xml(data: bytes):
|
||||
return llbase.llsd.parse_xml(data)
|
||||
return base_llsd.parse_xml(data)
|
||||
|
||||
|
||||
def parse_notation(data: bytes):
|
||||
return llbase.llsd.parse_notation(data)
|
||||
return base_llsd.parse_notation(data)
|
||||
|
||||
|
||||
def zip_llsd(val: typing.Any):
|
||||
@@ -189,6 +194,6 @@ def parse(data: bytes):
|
||||
else:
|
||||
return parse_notation(data)
|
||||
except KeyError as e:
|
||||
raise llbase.llsd.LLSDParseError('LLSD could not be parsed: %s' % (e,))
|
||||
raise base_llsd.LLSDParseError('LLSD could not be parsed: %s' % (e,))
|
||||
except TypeError as e:
|
||||
raise llbase.llsd.LLSDParseError('Input stream not of type bytes. %s' % (e,))
|
||||
raise base_llsd.LLSDParseError('Input stream not of type bytes. %s' % (e,))
|
||||
|
||||
@@ -11,15 +11,25 @@ from typing import *
|
||||
import zlib
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
import recordclass
|
||||
|
||||
from hippolyzer.lib.base import serialization as se
|
||||
from hippolyzer.lib.base.datatypes import Vector3, Vector2, UUID, TupleCoord
|
||||
from hippolyzer.lib.base.llsd import zip_llsd, unzip_llsd
|
||||
from hippolyzer.lib.base.serialization import ParseContext
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def llsd_to_mat4(mat: Union[np.ndarray, Sequence[float]]) -> np.ndarray:
|
||||
return np.array(mat).reshape((4, 4), order='F')
|
||||
|
||||
|
||||
def mat4_to_llsd(mat: np.ndarray) -> List[float]:
|
||||
return list(mat.flatten(order='F'))
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class MeshAsset:
|
||||
header: MeshHeaderDict = dataclasses.field(default_factory=dict)
|
||||
@@ -168,7 +178,7 @@ class DomainDict(TypedDict):
|
||||
Min: List[float]
|
||||
|
||||
|
||||
class VertexWeight(recordclass.datatuple): # type: ignore
|
||||
class VertexWeight(recordclass.RecordClass):
|
||||
"""Vertex weight for a specific joint on a specific vertex"""
|
||||
# index of the joint within the joint_names list in the skin segment
|
||||
joint_idx: int
|
||||
@@ -255,7 +265,6 @@ def positions_to_domain(positions: Iterable[TupleCoord], domain: DomainDict):
|
||||
|
||||
class VertexWeights(se.SerializableBase):
|
||||
"""Serializer for a list of joint weights on a single vertex"""
|
||||
INFLUENCE_SER = se.QuantizedFloat(se.U16, 0.0, 1.0)
|
||||
INFLUENCE_LIMIT = 4
|
||||
INFLUENCE_TERM = 0xFF
|
||||
|
||||
@@ -266,18 +275,30 @@ class VertexWeights(se.SerializableBase):
|
||||
for val in vals:
|
||||
joint_idx, influence = val
|
||||
writer.write(se.U8, joint_idx)
|
||||
writer.write(cls.INFLUENCE_SER, influence, ctx=ctx)
|
||||
writer.write(se.U16, round(influence * 0xFFff), ctx=ctx)
|
||||
if len(vals) != cls.INFLUENCE_LIMIT:
|
||||
writer.write(se.U8, cls.INFLUENCE_TERM)
|
||||
|
||||
@classmethod
|
||||
def deserialize(cls, reader: se.Reader, ctx=None):
|
||||
# NOTE: normally you'd want to do something like arrange this into a nicely
|
||||
# aligned byte array with zero padding so that you could vectorize the decoding.
|
||||
# In cases where having a vertex with no weights is semantically equivalent to
|
||||
# having a vertex _with_ weights of a value of 0.0 that's fine. This isn't the case
|
||||
# in LL's implementation of mesh:
|
||||
#
|
||||
# https://bitbucket.org/lindenlab/viewer/src/d31a83fb946c49a38376ea3b312b5380d0c8c065/indra/llmath/llvolume.cpp#lines-2560:2628
|
||||
#
|
||||
# Consider the difference between handling of b"\x00\x00\x00\xFF" and b"\xFF" with the above logic.
|
||||
# To simplify round-tripping while preserving those semantics, we don't do a vectorized decode.
|
||||
# I had a vectorized numpy version, but those requirements made everything a bit of a mess.
|
||||
influence_list = []
|
||||
for _ in range(cls.INFLUENCE_LIMIT):
|
||||
joint_idx = reader.read(se.U8)
|
||||
joint_idx = reader.read_bytes(1)[0]
|
||||
if joint_idx == cls.INFLUENCE_TERM:
|
||||
break
|
||||
influence_list.append(VertexWeight(joint_idx, reader.read(cls.INFLUENCE_SER, ctx=ctx)))
|
||||
weight = reader.read(se.U16, ctx=ctx) / 0xFFff
|
||||
influence_list.append(VertexWeight(joint_idx, weight))
|
||||
return influence_list
|
||||
|
||||
|
||||
@@ -312,16 +333,46 @@ class SegmentSerializer:
|
||||
return new_segment
|
||||
|
||||
|
||||
class VecListAdapter(se.Adapter):
|
||||
def __init__(self, child_spec: se.SERIALIZABLE_TYPE, vec_type: Type):
|
||||
super().__init__(child_spec)
|
||||
self.vec_type = vec_type
|
||||
|
||||
def encode(self, val: Any, ctx: Optional[ParseContext]) -> Any:
|
||||
return val
|
||||
|
||||
def decode(self, val: Any, ctx: Optional[ParseContext], pod: bool = False) -> Any:
|
||||
new_vals = []
|
||||
for elem in val:
|
||||
new_vals.append(self.vec_type(*elem))
|
||||
return new_vals
|
||||
|
||||
|
||||
LE_U16: np.dtype = np.dtype(np.uint16).newbyteorder('<') # noqa
|
||||
|
||||
|
||||
LOD_SEGMENT_SERIALIZER = SegmentSerializer({
|
||||
# 16-bit indices to the verts making up the tri. Imposes a 16-bit
|
||||
# upper limit on verts in any given material in the mesh.
|
||||
"TriangleList": se.Collection(None, se.Collection(3, se.U16)),
|
||||
"TriangleList": se.ExprAdapter(
|
||||
se.NumPyArray(se.BytesGreedy(), LE_U16, 3),
|
||||
decode_func=lambda x: x.tolist(),
|
||||
),
|
||||
# These are used to interpolate between values in their respective domains
|
||||
# Each position represents a single vert.
|
||||
"Position": se.Collection(None, se.Vector3U16(0.0, 1.0)),
|
||||
"TexCoord0": se.Collection(None, se.Vector2U16(0.0, 1.0)),
|
||||
# Normals have a static domain between -1 and 1, so just use that.
|
||||
"Normal": se.Collection(None, se.Vector3U16(-1.0, 1.0)),
|
||||
"Position": VecListAdapter(
|
||||
se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), LE_U16, 3), 0.0, 1.0),
|
||||
Vector3,
|
||||
),
|
||||
"TexCoord0": VecListAdapter(
|
||||
se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), LE_U16, 2), 0.0, 1.0),
|
||||
Vector2,
|
||||
),
|
||||
# Normals have a static domain between -1 and 1, so we just use that rather than 0.0 - 1.0.
|
||||
"Normal": VecListAdapter(
|
||||
se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), LE_U16, 3), -1.0, 1.0),
|
||||
Vector3,
|
||||
),
|
||||
"Weights": se.Collection(None, VertexWeights)
|
||||
})
|
||||
|
||||
|
||||
121
hippolyzer/lib/base/mesh_skeleton.py
Normal file
121
hippolyzer/lib/base/mesh_skeleton.py
Normal file
@@ -0,0 +1,121 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import weakref
|
||||
from typing import *
|
||||
|
||||
import transformations
|
||||
from lxml import etree
|
||||
|
||||
from hippolyzer.lib.base.datatypes import Vector3, RAD_TO_DEG
|
||||
from hippolyzer.lib.base.helpers import get_resource_filename
|
||||
|
||||
|
||||
MAYBE_JOINT_REF = Optional[Callable[[], "JointNode"]]
|
||||
SKELETON_REF = Optional[Callable[[], "Skeleton"]]
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class JointNode:
|
||||
name: str
|
||||
parent: MAYBE_JOINT_REF
|
||||
skeleton: SKELETON_REF
|
||||
translation: Vector3
|
||||
pivot: Vector3 # pivot point for the joint, generally the same as translation
|
||||
rotation: Vector3 # Euler rotation in degrees
|
||||
scale: Vector3
|
||||
type: str # bone or collision_volume
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.name, self.type))
|
||||
|
||||
@property
|
||||
def matrix(self):
|
||||
return transformations.compose_matrix(
|
||||
scale=tuple(self.scale),
|
||||
angles=tuple(self.rotation / RAD_TO_DEG),
|
||||
translate=tuple(self.translation),
|
||||
)
|
||||
|
||||
@property
|
||||
def index(self) -> int:
|
||||
bone_idx = 0
|
||||
for node in self.skeleton().joint_dict.values():
|
||||
if node.type != "bone":
|
||||
continue
|
||||
if self is node:
|
||||
return bone_idx
|
||||
bone_idx += 1
|
||||
raise KeyError(f"{self.name!r} doesn't exist in skeleton")
|
||||
|
||||
@property
|
||||
def ancestors(self) -> Sequence[JointNode]:
|
||||
joint_node = self
|
||||
ancestors = []
|
||||
while joint_node.parent:
|
||||
joint_node = joint_node.parent()
|
||||
ancestors.append(joint_node)
|
||||
return ancestors
|
||||
|
||||
@property
|
||||
def children(self) -> Sequence[JointNode]:
|
||||
children = []
|
||||
for node in self.skeleton().joint_dict.values():
|
||||
if node.parent and node.parent() == self:
|
||||
children.append(node)
|
||||
return children
|
||||
|
||||
@property
|
||||
def descendents(self) -> Set[JointNode]:
|
||||
descendents = set()
|
||||
ancestors = {self}
|
||||
last_ancestors = set()
|
||||
while last_ancestors != ancestors:
|
||||
last_ancestors = ancestors
|
||||
for node in self.skeleton().joint_dict.values():
|
||||
if node.parent and node.parent() in ancestors:
|
||||
ancestors.add(node)
|
||||
descendents.add(node)
|
||||
return descendents
|
||||
|
||||
|
||||
class Skeleton:
|
||||
def __init__(self, root_node: etree.ElementBase):
|
||||
self.joint_dict: Dict[str, JointNode] = {}
|
||||
self._parse_node_children(root_node, None)
|
||||
|
||||
def __getitem__(self, item: str) -> JointNode:
|
||||
return self.joint_dict[item]
|
||||
|
||||
def _parse_node_children(self, node: etree.ElementBase, parent: MAYBE_JOINT_REF):
|
||||
name = node.get('name')
|
||||
joint = JointNode(
|
||||
name=name,
|
||||
parent=parent,
|
||||
skeleton=weakref.ref(self),
|
||||
translation=_get_vec_attr(node, "pos", Vector3()),
|
||||
pivot=_get_vec_attr(node, "pivot", Vector3()),
|
||||
rotation=_get_vec_attr(node, "rot", Vector3()),
|
||||
scale=_get_vec_attr(node, "scale", Vector3(1, 1, 1)),
|
||||
type=node.tag,
|
||||
)
|
||||
self.joint_dict[name] = joint
|
||||
for child in node.iterchildren():
|
||||
self._parse_node_children(child, weakref.ref(joint))
|
||||
|
||||
|
||||
def _get_vec_attr(node, attr_name: str, default: Vector3) -> Vector3:
|
||||
attr_val = node.get(attr_name, None)
|
||||
if not attr_val:
|
||||
return default
|
||||
return Vector3(*(float(x) for x in attr_val.split(" ") if x))
|
||||
|
||||
|
||||
def load_avatar_skeleton() -> Skeleton:
|
||||
skel_path = get_resource_filename("lib/base/data/avatar_skeleton.xml")
|
||||
with open(skel_path, 'r') as f:
|
||||
skel_root = etree.fromstring(f.read())
|
||||
return Skeleton(skel_root.getchildren()[0])
|
||||
|
||||
|
||||
AVATAR_SKELETON = load_avatar_skeleton()
|
||||
@@ -77,9 +77,6 @@ class Circuit:
|
||||
)
|
||||
return self._send_prepared_message(message, transport)
|
||||
|
||||
# Temporary alias
|
||||
send_message = send
|
||||
|
||||
def send_reliable(self, message: Message, transport=None) -> asyncio.Future:
|
||||
"""send() wrapper that always sends reliably and allows `await`ing ACK receipt"""
|
||||
if not message.synthetic:
|
||||
|
||||
@@ -78,7 +78,7 @@ class TemplateDataPacker:
|
||||
MsgType.MVT_S8: _make_struct_spec('b'),
|
||||
MsgType.MVT_U8: _make_struct_spec('B'),
|
||||
MsgType.MVT_BOOL: _make_struct_spec('B'),
|
||||
MsgType.MVT_LLUUID: (lambda x: UUID(bytes=bytes(x)), lambda x: x.bytes),
|
||||
MsgType.MVT_LLUUID: (lambda x: UUID(bytes=bytes(x)), lambda x: UUID(x).bytes),
|
||||
MsgType.MVT_IP_ADDR: (socket.inet_ntoa, socket.inet_aton),
|
||||
MsgType.MVT_IP_PORT: _make_struct_spec('!H'),
|
||||
MsgType.MVT_U16: _make_struct_spec('<H'),
|
||||
|
||||
@@ -222,7 +222,7 @@ class Message:
|
||||
def add_blocks(self, block_list):
|
||||
# can have a list of blocks if it is multiple or variable
|
||||
for block in block_list:
|
||||
if type(block) == list:
|
||||
if type(block) is list:
|
||||
for bl in block:
|
||||
self.add_block(bl)
|
||||
else:
|
||||
|
||||
@@ -20,7 +20,7 @@ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
"""
|
||||
from logging import getLogger
|
||||
|
||||
from llbase import llsd
|
||||
import llsd
|
||||
|
||||
from hippolyzer.lib.base.message.data import msg_details
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ class UDPMessageDeserializer:
|
||||
frequency, num = _parse_msg_num(reader)
|
||||
current_template = self.template_dict.get_template_by_pair(frequency, num)
|
||||
if current_template is None:
|
||||
raise exc.MessageTemplateNotFound("deserializing data")
|
||||
raise exc.MessageTemplateNotFound("deserializing data", f"{frequency}:{num}")
|
||||
msg.name = current_template.name
|
||||
|
||||
# extra field, see note regarding msg.offset
|
||||
@@ -157,7 +157,6 @@ class UDPMessageDeserializer:
|
||||
reader.seek(current_template.get_msg_freq_num_len() + msg.offset)
|
||||
|
||||
for tmpl_block in current_template.blocks:
|
||||
LOG.debug("Parsing %s:%s" % (msg.name, tmpl_block.name))
|
||||
# EOF?
|
||||
if not len(reader):
|
||||
# Seems like even some "Single" blocks are optional?
|
||||
@@ -180,7 +179,6 @@ class UDPMessageDeserializer:
|
||||
|
||||
for i in range(repeat_count):
|
||||
current_block = Block(tmpl_block.name)
|
||||
LOG.debug("Adding block %s" % current_block.name)
|
||||
msg.add_block(current_block)
|
||||
|
||||
for tmpl_variable in tmpl_block.variables:
|
||||
|
||||
@@ -46,6 +46,9 @@ class UDPPacket:
|
||||
return self.dst_addr
|
||||
return self.src_addr
|
||||
|
||||
def __repr__(self):
|
||||
return f"<{self.__class__.__name__} src_addr={self.src_addr!r} dst_addr={self.dst_addr!r} data={self.data!r}>"
|
||||
|
||||
|
||||
class AbstractUDPTransport(abc.ABC):
|
||||
__slots__ = ()
|
||||
|
||||
@@ -35,12 +35,7 @@ import hippolyzer.lib.base.serialization as se
|
||||
import hippolyzer.lib.base.templates as tmpls
|
||||
|
||||
|
||||
class Object(recordclass.datatuple): # type: ignore
|
||||
__options__ = {
|
||||
"use_weakref": True,
|
||||
}
|
||||
__weakref__: Any
|
||||
|
||||
class Object(recordclass.RecordClass, use_weakref=True): # type: ignore
|
||||
LocalID: Optional[int] = None
|
||||
State: Optional[int] = None
|
||||
FullID: Optional[UUID] = None
|
||||
@@ -199,6 +194,28 @@ class Object(recordclass.datatuple): # type: ignore
|
||||
del val["Parent"]
|
||||
return val
|
||||
|
||||
@property
|
||||
def Ancestors(self) -> List[Object]:
|
||||
obj = self
|
||||
ancestors = []
|
||||
while obj.Parent:
|
||||
obj = obj.Parent
|
||||
ancestors.append(obj)
|
||||
return ancestors
|
||||
|
||||
@property
|
||||
def Descendents(self) -> List[Object]:
|
||||
new_children = [self]
|
||||
descendents = []
|
||||
while new_children:
|
||||
to_check = new_children[:]
|
||||
new_children.clear()
|
||||
for obj in to_check:
|
||||
for child in obj.Children:
|
||||
new_children.append(child)
|
||||
descendents.append(child)
|
||||
return descendents
|
||||
|
||||
|
||||
def handle_to_gridxy(handle: int) -> Tuple[int, int]:
|
||||
return (handle >> 32) // 256, (handle & 0xFFffFFff) // 256
|
||||
|
||||
@@ -10,6 +10,7 @@ from io import SEEK_CUR, SEEK_SET, SEEK_END, RawIOBase, BufferedIOBase
|
||||
from typing import *
|
||||
|
||||
import lazy_object_proxy
|
||||
import numpy as np
|
||||
|
||||
import hippolyzer.lib.base.llsd as llsd
|
||||
import hippolyzer.lib.base.datatypes as dtypes
|
||||
@@ -838,7 +839,7 @@ class QuantizedFloat(QuantizedFloatBase):
|
||||
super().__init__(prim_spec, zero_median=False)
|
||||
self.lower = lower
|
||||
self.upper = upper
|
||||
# We know the range in `QuantizedFloat` when it's constructed, so we can infer
|
||||
# We know the range in `QuantizedFloat` when it's constructed, so we can infer
|
||||
# whether or not we should round towards zero in __init__
|
||||
max_error = (upper - lower) * self.step_mag
|
||||
midpoint = (upper + lower) / 2.0
|
||||
@@ -1610,7 +1611,9 @@ class BitfieldDataclass(DataclassAdapter):
|
||||
|
||||
|
||||
class ExprAdapter(Adapter):
|
||||
def __init__(self, child_spec: SERIALIZABLE_TYPE, decode_func: Callable, encode_func: Callable):
|
||||
_ID = lambda x: x
|
||||
|
||||
def __init__(self, child_spec: SERIALIZABLE_TYPE, decode_func: Callable = _ID, encode_func: Callable = _ID):
|
||||
super().__init__(child_spec)
|
||||
self._decode_func = decode_func
|
||||
self._encode_func = encode_func
|
||||
@@ -1659,6 +1662,62 @@ class BinaryLLSD(SerializableBase):
|
||||
writer.write_bytes(llsd.format_binary(val, with_header=False))
|
||||
|
||||
|
||||
class NumPyArray(Adapter):
|
||||
"""
|
||||
An 2-dimensional, dynamic-length array of data from numpy. Greedy.
|
||||
|
||||
Unlike most other serializers, your endianness _must_ be specified in the dtype!
|
||||
"""
|
||||
__slots__ = ['dtype', 'elems']
|
||||
|
||||
def __init__(self, child_spec: Optional[SERIALIZABLE_TYPE], dtype: np.dtype, elems: int):
|
||||
super().__init__(child_spec)
|
||||
self.dtype = dtype
|
||||
self.elems = elems
|
||||
|
||||
def _pick_dtype(self, endian: str) -> np.dtype:
|
||||
return self.dtype.newbyteorder('>') if endian != "<" else self.dtype
|
||||
|
||||
def decode(self, val: Any, ctx: Optional[ParseContext], pod: bool = False) -> Any:
|
||||
num_elems = len(val) // self.dtype.itemsize
|
||||
num_ndims = num_elems // self.elems
|
||||
buf_array = np.frombuffer(val, dtype=self.dtype, count=num_elems)
|
||||
return buf_array.reshape((num_ndims, self.elems))
|
||||
|
||||
def encode(self, val, ctx: Optional[ParseContext]) -> Any:
|
||||
val: np.ndarray = np.array(val, dtype=self.dtype).flatten()
|
||||
return val.tobytes()
|
||||
|
||||
|
||||
class QuantizedNumPyArray(Adapter):
|
||||
"""Like QuantizedFloat. Only works correctly for unsigned types, no zero midpoint rounding!"""
|
||||
def __init__(self, child_spec: NumPyArray, lower: float, upper: float):
|
||||
super().__init__(child_spec)
|
||||
self.dtype = child_spec.dtype
|
||||
self.lower = lower
|
||||
self.upper = upper
|
||||
self.step_mag = 1.0 / ((2 ** (self.dtype.itemsize * 8)) - 1)
|
||||
|
||||
def encode(self, val: Any, ctx: Optional[ParseContext]) -> Any:
|
||||
val = np.array(val, dtype=np.float64)
|
||||
val = np.clip(val, self.lower, self.upper)
|
||||
delta = self.upper - self.lower
|
||||
if delta == 0.0:
|
||||
return np.zeros(val.shape, dtype=self.dtype)
|
||||
|
||||
val -= self.lower
|
||||
val /= delta
|
||||
val /= self.step_mag
|
||||
return np.rint(val).astype(self.dtype)
|
||||
|
||||
def decode(self, val: Any, ctx: Optional[ParseContext], pod: bool = False) -> Any:
|
||||
val = val.astype(np.float64)
|
||||
val *= self.step_mag
|
||||
val *= self.upper - self.lower
|
||||
val += self.lower
|
||||
return val
|
||||
|
||||
|
||||
def subfield_serializer(msg_name, block_name, var_name):
|
||||
def f(orig_cls):
|
||||
global SUBFIELD_SERIALIZERS
|
||||
@@ -1858,7 +1917,7 @@ class IntEnumSubfieldSerializer(AdapterInstanceSubfieldSerializer):
|
||||
val = super().deserialize(ctx_obj, val, pod=pod)
|
||||
# Don't pretend we were able to deserialize this if we
|
||||
# had to fall through to the `int` case.
|
||||
if pod and type(val) == int:
|
||||
if pod and type(val) is int:
|
||||
return UNSERIALIZABLE
|
||||
return val
|
||||
|
||||
|
||||
@@ -4,14 +4,17 @@ Serialization templates for structures used in LLUDP and HTTP bodies.
|
||||
|
||||
import abc
|
||||
import collections
|
||||
import copy
|
||||
import dataclasses
|
||||
import datetime
|
||||
import enum
|
||||
import math
|
||||
import zlib
|
||||
from typing import *
|
||||
|
||||
import hippolyzer.lib.base.serialization as se
|
||||
from hippolyzer.lib.base import llsd
|
||||
from hippolyzer.lib.base.datatypes import UUID, IntEnum, IntFlag, Vector3
|
||||
from hippolyzer.lib.base.datatypes import UUID, IntEnum, IntFlag, Vector3, Quaternion
|
||||
from hippolyzer.lib.base.namevalue import NameValuesSerializer
|
||||
|
||||
|
||||
@@ -1249,7 +1252,7 @@ class TextureEntryCollection:
|
||||
vals = getattr(self, key)
|
||||
# Fill give all faces the default value for this key
|
||||
for te in as_dicts:
|
||||
te[key] = vals[None]
|
||||
te[key] = copy.copy(vals[None])
|
||||
# Walk over the exception cases and replace the default value
|
||||
for face_nums, val in vals.items():
|
||||
# Default case already handled
|
||||
@@ -1258,7 +1261,7 @@ class TextureEntryCollection:
|
||||
for face_num in face_nums:
|
||||
if face_num >= num_faces:
|
||||
raise ValueError(f"Bad value for num_faces? {face_num} >= {num_faces}")
|
||||
as_dicts[face_num][key] = val
|
||||
as_dicts[face_num][key] = copy.copy(val)
|
||||
return [TextureEntry(**x) for x in as_dicts]
|
||||
|
||||
@classmethod
|
||||
@@ -1856,6 +1859,8 @@ class AvatarPropertiesFlags(IntFlag):
|
||||
@se.flag_field_serializer("AvatarGroupsReply", "GroupData", "GroupPowers")
|
||||
@se.flag_field_serializer("AvatarGroupDataUpdate", "GroupData", "GroupPowers")
|
||||
@se.flag_field_serializer("AvatarDataUpdate", "AgentDataData", "GroupPowers")
|
||||
@se.flag_field_serializer("GroupProfileReply", "GroupData", "PowersMask")
|
||||
@se.flag_field_serializer("GroupRoleDataReply", "RoleData", "Powers")
|
||||
class GroupPowerFlags(IntFlag):
|
||||
MEMBER_INVITE = 1 << 1 # Invite member
|
||||
MEMBER_EJECT = 1 << 2 # Eject member from group
|
||||
@@ -1945,6 +1950,15 @@ class GroupPowerFlags(IntFlag):
|
||||
GROUP_BAN_ACCESS = 1 << 51 # Allows access to ban / un-ban agents from a group.
|
||||
|
||||
|
||||
@se.flag_field_serializer("GrantUserRights", "Rights", "RelatedRights")
|
||||
@se.flag_field_serializer("ChangeUserRights", "Rights", "RelatedRights")
|
||||
class UserRelatedRights(IntFlag):
|
||||
"""See lluserrelations.h for definitions"""
|
||||
ONLINE_STATUS = 1
|
||||
MAP_LOCATION = 1 << 1
|
||||
MODIFY_OBJECTS = 1 << 2
|
||||
|
||||
|
||||
@se.flag_field_serializer("RequestObjectPropertiesFamily", "ObjectData", "RequestFlags")
|
||||
@se.flag_field_serializer("ObjectPropertiesFamily", "ObjectData", "RequestFlags")
|
||||
class ObjectPropertiesFamilyRequestFlags(IntFlag):
|
||||
@@ -2025,6 +2039,50 @@ class ScriptPermissions(IntFlag):
|
||||
CHANGE_ENVIRONMENT = 1 << 18
|
||||
|
||||
|
||||
@se.enum_field_serializer("UpdateMuteListEntry", "MuteData", "MuteType")
|
||||
class MuteType(IntEnum):
|
||||
BY_NAME = 0
|
||||
AGENT = 1
|
||||
OBJECT = 2
|
||||
GROUP = 3
|
||||
# Voice, presumably.
|
||||
EXTERNAL = 4
|
||||
|
||||
|
||||
@se.flag_field_serializer("UpdateMuteListEntry", "MuteData", "MuteFlags")
|
||||
class MuteFlags(IntFlag):
|
||||
# For backwards compatibility (since any mute list entries that were created before the flags existed
|
||||
# will have a flags field of 0), some flags are "inverted".
|
||||
# Note that it's possible, through flags, to completely disable an entry in the mute list.
|
||||
# The code should detect this case and remove the mute list entry instead.
|
||||
TEXT_CHAT = 1 << 0
|
||||
VOICE_CHAT = 1 << 1
|
||||
PARTICLES = 1 << 2
|
||||
OBJECT_SOUNDS = 1 << 3
|
||||
|
||||
@property
|
||||
def DEFAULT(self):
|
||||
return 0x0
|
||||
|
||||
@property
|
||||
def ALL(self):
|
||||
return 0xF
|
||||
|
||||
|
||||
class CreationDateAdapter(se.Adapter):
|
||||
def decode(self, val: Any, ctx: Optional[se.ParseContext], pod: bool = False) -> Any:
|
||||
return datetime.datetime.fromtimestamp(val / 1_000_000).isoformat()
|
||||
|
||||
def encode(self, val: Any, ctx: Optional[se.ParseContext]) -> Any:
|
||||
return int(datetime.datetime.fromisoformat(val).timestamp() * 1_000_000)
|
||||
|
||||
|
||||
@se.subfield_serializer("ObjectProperties", "ObjectData", "CreationDate")
|
||||
class CreationDateSerializer(se.AdapterSubfieldSerializer):
|
||||
ADAPTER = CreationDateAdapter(None)
|
||||
ORIG_INLINE = True
|
||||
|
||||
|
||||
@se.http_serializer("RenderMaterials")
|
||||
class RenderMaterialsSerializer(se.BaseHTTPSerializer):
|
||||
@classmethod
|
||||
@@ -2055,3 +2113,69 @@ class RetrieveNavMeshSrcSerializer(se.BaseHTTPSerializer):
|
||||
# 15 bit window size, gzip wrapped
|
||||
deser["navmesh_data"] = zlib.decompress(deser["navmesh_data"], wbits=15 | 32)
|
||||
return deser
|
||||
|
||||
|
||||
# Beta puppetry stuff, subject to change!
|
||||
|
||||
|
||||
class PuppetryEventMask(enum.IntFlag):
|
||||
POSITION = 1 << 0
|
||||
POSITION_IN_PARENT_FRAME = 1 << 1
|
||||
ROTATION = 1 << 2
|
||||
ROTATION_IN_PARENT_FRAME = 1 << 3
|
||||
SCALE = 1 << 4
|
||||
DISABLE_CONSTRAINT = 1 << 7
|
||||
|
||||
|
||||
class PuppetryOption(se.OptionalFlagged):
|
||||
def __init__(self, flag_val, spec):
|
||||
super().__init__("mask", se.IntFlag(PuppetryEventMask, se.U8), flag_val, spec)
|
||||
|
||||
|
||||
# Range to use for puppetry's quantized floats when converting to<->from U16
|
||||
LL_PELVIS_OFFSET_RANGE = (-5.0, 5.0)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class PuppetryJointData:
|
||||
# Where does this number come from? `avatar_skeleton.xml`?
|
||||
joint_id: int = se.dataclass_field(se.S16)
|
||||
# Determines which fields will follow
|
||||
mask: PuppetryEventMask = se.dataclass_field(se.IntFlag(PuppetryEventMask, se.U8))
|
||||
rotation: Optional[Quaternion] = se.dataclass_field(
|
||||
# These are very odd scales for a quantized quaternion, but that's what they are.
|
||||
PuppetryOption(PuppetryEventMask.ROTATION, se.PackedQuat(se.Vector3U16(*LL_PELVIS_OFFSET_RANGE))),
|
||||
)
|
||||
position: Optional[Vector3] = se.dataclass_field(
|
||||
PuppetryOption(PuppetryEventMask.POSITION, se.Vector3U16(*LL_PELVIS_OFFSET_RANGE)),
|
||||
)
|
||||
scale: Optional[Vector3] = se.dataclass_field(
|
||||
PuppetryOption(PuppetryEventMask.SCALE, se.Vector3U16(*LL_PELVIS_OFFSET_RANGE)),
|
||||
)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class PuppetryEventData:
|
||||
time: int = se.dataclass_field(se.S32)
|
||||
# Must be set manually due to below issue
|
||||
num_joints: int = se.dataclass_field(se.U16)
|
||||
# This field is packed in the least helpful way possible. The length field
|
||||
# is in between the collection count and the collection data, but the length
|
||||
# field essentially only tells you how many bytes until the end of the buffer
|
||||
# proper, which you already know from msgsystem. Why is this here?
|
||||
joints: List[PuppetryJointData] = se.dataclass_field(se.TypedByteArray(
|
||||
se.U32,
|
||||
# Just treat contents as a greedy collection, tries to keep reading until EOF
|
||||
se.Collection(None, se.Dataclass(PuppetryJointData)),
|
||||
))
|
||||
|
||||
|
||||
@se.subfield_serializer("AgentAnimation", "PhysicalAvatarEventList", "TypeData")
|
||||
@se.subfield_serializer("AvatarAnimation", "PhysicalAvatarEventList", "TypeData")
|
||||
class PuppetryEventDataSerializer(se.SimpleSubfieldSerializer):
|
||||
# You can have multiple joint events packed in right after the other, implicitly.
|
||||
# They may _or may not_ be split into separate PhysicalAvatarEventList blocks?
|
||||
# This doesn't seem to be handled specifically in the decoder, is this a
|
||||
# serialization bug in the viewer?
|
||||
TEMPLATE = se.Collection(None, se.Dataclass(PuppetryEventData))
|
||||
EMPTY_IS_NONE = True
|
||||
|
||||
@@ -35,9 +35,8 @@ class VisualParam:
|
||||
|
||||
|
||||
class VisualParams(List[VisualParam]):
|
||||
def __init__(self):
|
||||
def __init__(self, lad_path):
|
||||
super().__init__()
|
||||
lad_path = get_resource_filename("lib/base/data/avatar_lad.xml")
|
||||
with open(lad_path, "rb") as f:
|
||||
doc = parse_etree(f)
|
||||
for param in doc.findall(".//param"):
|
||||
@@ -59,8 +58,11 @@ class VisualParams(List[VisualParam]):
|
||||
def by_wearable(self, wearable: str) -> List[VisualParam]:
|
||||
return [x for x in self if x.wearable == wearable]
|
||||
|
||||
def by_id(self, vparam_id: int) -> VisualParam:
|
||||
return [x for x in self if x.id == vparam_id][0]
|
||||
|
||||
VISUAL_PARAMS = VisualParams()
|
||||
|
||||
VISUAL_PARAMS = VisualParams(get_resource_filename("lib/base/data/avatar_lad.xml"))
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import NamedTuple, Union, Optional
|
||||
from typing import NamedTuple, Union, Optional, List
|
||||
|
||||
import hippolyzer.lib.base.serialization as se
|
||||
from hippolyzer.lib.base import llsd
|
||||
@@ -18,6 +18,11 @@ class UploadToken(NamedTuple):
|
||||
payload: bytes
|
||||
|
||||
|
||||
class MeshUploadDetails(NamedTuple):
|
||||
mesh_bytes: bytes
|
||||
num_faces: int
|
||||
|
||||
|
||||
class AssetUploader:
|
||||
def __init__(self, region: BaseClientRegion):
|
||||
self._region = region
|
||||
@@ -69,20 +74,15 @@ class AssetUploader:
|
||||
"""
|
||||
pass
|
||||
|
||||
# The mesh upload flow is a little special, so it gets its own methods
|
||||
async def initiate_mesh_upload(self, name: str, mesh: Union[bytes, MeshAsset],
|
||||
# The mesh upload flow is a little special, so it gets its own method
|
||||
async def initiate_mesh_upload(self, name: str, mesh: Union[MeshUploadDetails, MeshAsset],
|
||||
flags: Optional[int] = None) -> UploadToken:
|
||||
"""
|
||||
Very basic LL-serialized mesh uploader
|
||||
|
||||
Currently only handles a single mesh with a single face and no associated textures.
|
||||
"""
|
||||
if isinstance(mesh, MeshAsset):
|
||||
writer = se.BufferWriter("!")
|
||||
writer.write(LLMeshSerializer(), mesh)
|
||||
mesh = writer.copy_buffer()
|
||||
mesh = MeshUploadDetails(writer.copy_buffer(), len(mesh.segments['high_lod']))
|
||||
|
||||
asset_resources = self._build_asset_resources(name, mesh)
|
||||
asset_resources = self._build_asset_resources(name, [mesh])
|
||||
payload = {
|
||||
'asset_resources': asset_resources,
|
||||
'asset_type': 'mesh',
|
||||
@@ -102,26 +102,26 @@ class AssetUploader:
|
||||
upload_body = llsd.format_xml(asset_resources)
|
||||
return UploadToken(resp_payload["upload_price"], resp_payload["uploader"], upload_body)
|
||||
|
||||
def _build_asset_resources(self, name: str, mesh: bytes) -> dict:
|
||||
def _build_asset_resources(self, name: str, meshes: List[MeshUploadDetails]) -> dict:
|
||||
instances = []
|
||||
for mesh in meshes:
|
||||
instances.append({
|
||||
'face_list': [{
|
||||
'diffuse_color': [1.0, 1.0, 1.0, 1.0],
|
||||
'fullbright': False
|
||||
}] * mesh.num_faces,
|
||||
'material': 3,
|
||||
'mesh': 0,
|
||||
'mesh_name': name,
|
||||
'physics_shape_type': 2,
|
||||
'position': [0.0, 0.0, 0.0],
|
||||
'rotation': [0.7071067690849304, 0.0, 0.0, 0.7071067690849304],
|
||||
'scale': [1.0, 1.0, 1.0]
|
||||
})
|
||||
|
||||
return {
|
||||
'instance_list': [
|
||||
{
|
||||
'face_list': [
|
||||
{
|
||||
'diffuse_color': [1.0, 1.0, 1.0, 1.0],
|
||||
'fullbright': False
|
||||
}
|
||||
],
|
||||
'material': 3,
|
||||
'mesh': 0,
|
||||
'mesh_name': name,
|
||||
'physics_shape_type': 2,
|
||||
'position': [0.0, 0.0, 0.0],
|
||||
'rotation': [0.7071067690849304, 0.0, 0.0, 0.7071067690849304],
|
||||
'scale': [1.0, 1.0, 1.0]
|
||||
}
|
||||
],
|
||||
'mesh_list': [mesh],
|
||||
'instance_list': instances,
|
||||
'mesh_list': [mesh.mesh_bytes for mesh in meshes],
|
||||
'metric': 'MUT_Unspecified',
|
||||
'texture_list': []
|
||||
}
|
||||
|
||||
@@ -297,7 +297,8 @@ class ClientWorldObjectManager:
|
||||
self._rebuild_avatar_objects()
|
||||
self._region_managers.clear()
|
||||
|
||||
def _update_existing_object(self, obj: Object, new_properties: dict, update_type: ObjectUpdateType):
|
||||
def _update_existing_object(self, obj: Object, new_properties: dict, update_type: ObjectUpdateType,
|
||||
msg: Optional[Message]):
|
||||
old_parent_id = obj.ParentID
|
||||
new_parent_id = new_properties.get("ParentID", obj.ParentID)
|
||||
old_local_id = obj.LocalID
|
||||
@@ -340,23 +341,23 @@ class ClientWorldObjectManager:
|
||||
LOG.warning(f"Tried to move object {obj!r} to unknown region {new_region_handle}")
|
||||
|
||||
if obj.PCode == PCode.AVATAR:
|
||||
# `Avatar` instances are handled separately. Update all Avatar objects so
|
||||
# we can deal with the RegionHandle change.
|
||||
# `Avatar` instances are handled separately. Update all Avatar objects,
|
||||
# so we can deal with the RegionHandle change.
|
||||
self._rebuild_avatar_objects()
|
||||
elif new_parent_id != old_parent_id:
|
||||
# Parent ID changed, but we're in the same region
|
||||
new_region_state.handle_object_reparented(obj, old_parent_id=old_parent_id)
|
||||
|
||||
if actually_updated_props and new_region_state is not None:
|
||||
self._run_object_update_hooks(obj, actually_updated_props, update_type)
|
||||
self._run_object_update_hooks(obj, actually_updated_props, update_type, msg)
|
||||
|
||||
def _track_new_object(self, region: RegionObjectsState, obj: Object):
|
||||
def _track_new_object(self, region: RegionObjectsState, obj: Object, msg: Message):
|
||||
region.track_object(obj)
|
||||
self._fullid_lookup[obj.FullID] = obj
|
||||
if obj.PCode == PCode.AVATAR:
|
||||
self._avatar_objects[obj.FullID] = obj
|
||||
self._rebuild_avatar_objects()
|
||||
self._run_object_update_hooks(obj, set(obj.to_dict().keys()), ObjectUpdateType.OBJECT_UPDATE)
|
||||
self._run_object_update_hooks(obj, set(obj.to_dict().keys()), ObjectUpdateType.OBJECT_UPDATE, msg)
|
||||
|
||||
def _kill_object_by_local_id(self, region_state: RegionObjectsState, local_id: int):
|
||||
obj = region_state.lookup_localid(local_id)
|
||||
@@ -408,11 +409,11 @@ class ClientWorldObjectManager:
|
||||
# our view of the world then we want to move it to this region.
|
||||
obj = self.lookup_fullid(object_data["FullID"])
|
||||
if obj:
|
||||
self._update_existing_object(obj, object_data, ObjectUpdateType.OBJECT_UPDATE)
|
||||
self._update_existing_object(obj, object_data, ObjectUpdateType.OBJECT_UPDATE, msg)
|
||||
else:
|
||||
if region_state is None:
|
||||
continue
|
||||
self._track_new_object(region_state, Object(**object_data))
|
||||
self._track_new_object(region_state, Object(**object_data), msg)
|
||||
msg.meta["ObjectUpdateIDs"] = tuple(seen_locals)
|
||||
|
||||
def _handle_terse_object_update(self, msg: Message):
|
||||
@@ -432,7 +433,7 @@ class ClientWorldObjectManager:
|
||||
# Need the Object as context because decoding state requires PCode.
|
||||
state_deserializer = ObjectStateSerializer.deserialize
|
||||
object_data["State"] = state_deserializer(ctx_obj=obj, val=object_data["State"])
|
||||
self._update_existing_object(obj, object_data, ObjectUpdateType.OBJECT_UPDATE)
|
||||
self._update_existing_object(obj, object_data, ObjectUpdateType.OBJECT_UPDATE, msg)
|
||||
else:
|
||||
if region_state:
|
||||
region_state.missing_locals.add(object_data["LocalID"])
|
||||
@@ -460,7 +461,7 @@ class ClientWorldObjectManager:
|
||||
self._update_existing_object(obj, {
|
||||
"UpdateFlags": update_flags,
|
||||
"RegionHandle": handle,
|
||||
}, ObjectUpdateType.OBJECT_UPDATE)
|
||||
}, ObjectUpdateType.OBJECT_UPDATE, msg)
|
||||
continue
|
||||
|
||||
cached_obj_data = self._lookup_cache_entry(handle, block["ID"], block["CRC"])
|
||||
@@ -468,7 +469,7 @@ class ClientWorldObjectManager:
|
||||
cached_obj = normalize_object_update_compressed_data(cached_obj_data)
|
||||
cached_obj["UpdateFlags"] = update_flags
|
||||
cached_obj["RegionHandle"] = handle
|
||||
self._track_new_object(region_state, Object(**cached_obj))
|
||||
self._track_new_object(region_state, Object(**cached_obj), msg)
|
||||
continue
|
||||
|
||||
# Don't know about it and wasn't cached.
|
||||
@@ -499,11 +500,11 @@ class ClientWorldObjectManager:
|
||||
LOG.warning(f"Got ObjectUpdateCompressed for unknown region {handle}: {object_data!r}")
|
||||
obj = self.lookup_fullid(object_data["FullID"])
|
||||
if obj:
|
||||
self._update_existing_object(obj, object_data, ObjectUpdateType.OBJECT_UPDATE)
|
||||
self._update_existing_object(obj, object_data, ObjectUpdateType.OBJECT_UPDATE, msg)
|
||||
else:
|
||||
if region_state is None:
|
||||
continue
|
||||
self._track_new_object(region_state, Object(**object_data))
|
||||
self._track_new_object(region_state, Object(**object_data), msg)
|
||||
msg.meta["ObjectUpdateIDs"] = tuple(seen_locals)
|
||||
|
||||
def _handle_object_properties_generic(self, packet: Message):
|
||||
@@ -516,7 +517,7 @@ class ClientWorldObjectManager:
|
||||
obj = self.lookup_fullid(block["ObjectID"])
|
||||
if obj:
|
||||
seen_locals.append(obj.LocalID)
|
||||
self._update_existing_object(obj, object_properties, ObjectUpdateType.PROPERTIES)
|
||||
self._update_existing_object(obj, object_properties, ObjectUpdateType.PROPERTIES, packet)
|
||||
else:
|
||||
LOG.debug(f"Received {packet.name} for unknown {block['ObjectID']}")
|
||||
packet.meta["ObjectUpdateIDs"] = tuple(seen_locals)
|
||||
@@ -563,11 +564,16 @@ class ClientWorldObjectManager:
|
||||
LOG.debug(f"Received ObjectCost for unknown {object_id}")
|
||||
continue
|
||||
obj.ObjectCosts.update(object_costs)
|
||||
self._run_object_update_hooks(obj, {"ObjectCosts"}, ObjectUpdateType.COSTS)
|
||||
self._run_object_update_hooks(obj, {"ObjectCosts"}, ObjectUpdateType.COSTS, None)
|
||||
|
||||
def _run_object_update_hooks(self, obj: Object, updated_props: Set[str], update_type: ObjectUpdateType):
|
||||
def _run_object_update_hooks(self, obj: Object, updated_props: Set[str], update_type: ObjectUpdateType,
|
||||
msg: Optional[Message]):
|
||||
region_state = self._get_region_state(obj.RegionHandle)
|
||||
region_state.resolve_futures(obj, update_type)
|
||||
if region_state:
|
||||
region_state.resolve_futures(obj, update_type)
|
||||
else:
|
||||
LOG.warning(f"{obj} not tied to a region state")
|
||||
|
||||
if obj.PCode == PCode.AVATAR and "NameValue" in updated_props:
|
||||
if obj.NameValue:
|
||||
self.name_cache.update(obj.FullID, obj.NameValue.to_dict())
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import *
|
||||
|
||||
import abc
|
||||
import copy
|
||||
import dataclasses
|
||||
@@ -9,6 +8,8 @@ import multiprocessing
|
||||
import pickle
|
||||
import warnings
|
||||
|
||||
import outleap
|
||||
|
||||
from hippolyzer.lib.base.datatypes import UUID, Vector3
|
||||
from hippolyzer.lib.base.message.message import Block, Message
|
||||
from hippolyzer.lib.base.objects import Object
|
||||
@@ -116,16 +117,14 @@ class MetaBaseAddon(abc.ABCMeta):
|
||||
Won't work as you expect!
|
||||
"""
|
||||
def __setattr__(self, key: str, value):
|
||||
# TODO: Keep track of AddonProperties in __new__ or something?
|
||||
try:
|
||||
existing = object.__getattribute__(self, key)
|
||||
if existing and isinstance(existing, BaseAddonProperty):
|
||||
existing.__set__(self, value)
|
||||
return
|
||||
except AttributeError:
|
||||
# If the attribute doesn't exist then it's fine to use the base setattr.
|
||||
super().__setattr__(key, value)
|
||||
return
|
||||
if existing and isinstance(existing, BaseAddonProperty):
|
||||
existing.__set__(self, value)
|
||||
return
|
||||
pass
|
||||
super().__setattr__(key, value)
|
||||
|
||||
|
||||
@@ -173,7 +172,7 @@ class BaseAddon(metaclass=MetaBaseAddon):
|
||||
pass
|
||||
|
||||
def handle_object_updated(self, session: Session, region: ProxiedRegion,
|
||||
obj: Object, updated_props: Set[str]):
|
||||
obj: Object, updated_props: Set[str], msg: Optional[Message]):
|
||||
pass
|
||||
|
||||
def handle_object_killed(self, session: Session, region: ProxiedRegion, obj: Object):
|
||||
@@ -196,6 +195,9 @@ class BaseAddon(metaclass=MetaBaseAddon):
|
||||
session: Optional[Session], region: Optional[ProxiedRegion]):
|
||||
pass
|
||||
|
||||
async def handle_leap_client_added(self, session_manager: SessionManager, leap_client: outleap.LEAPClient):
|
||||
pass
|
||||
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_U = TypeVar("_U", "Session", "SessionManager")
|
||||
@@ -209,13 +211,17 @@ class BaseAddonProperty(abc.ABC, Generic[_T, _U]):
|
||||
session_manager.addon_ctx dict, without any namespacing. Can be accessed either
|
||||
through `AddonClass.property_name` or `addon_instance.property_name`.
|
||||
"""
|
||||
__slots__ = ("name", "default")
|
||||
__slots__ = ("name", "default", "_owner")
|
||||
|
||||
def __init__(self, default=dataclasses.MISSING):
|
||||
self.default = default
|
||||
self._owner = None
|
||||
|
||||
def __set_name__(self, owner, name: str):
|
||||
self.name = name
|
||||
# Keep track of which addon "owns" this property so that we can shove
|
||||
# the data in a bucket specific to that addon name.
|
||||
self._owner = owner
|
||||
|
||||
def _make_default(self) -> _T:
|
||||
if self.default is not dataclasses.MISSING:
|
||||
@@ -233,18 +239,20 @@ class BaseAddonProperty(abc.ABC, Generic[_T, _U]):
|
||||
if ctx_obj is None:
|
||||
raise AttributeError(
|
||||
f"{self.__class__} {self.name} accessed outside proper context")
|
||||
addon_state = ctx_obj.addon_ctx[self._owner.__name__]
|
||||
# Set a default if we have one, otherwise let the keyerror happen.
|
||||
# Maybe we should do this at addon initialization instead of on get.
|
||||
if self.name not in ctx_obj.addon_ctx:
|
||||
if self.name not in addon_state:
|
||||
default = self._make_default()
|
||||
if default is not dataclasses.MISSING:
|
||||
ctx_obj.addon_ctx[self.name] = default
|
||||
addon_state[self.name] = default
|
||||
else:
|
||||
raise AttributeError(f"{self.name} is not set")
|
||||
return ctx_obj.addon_ctx[self.name]
|
||||
return addon_state[self.name]
|
||||
|
||||
def __set__(self, _obj, value: _T) -> None:
|
||||
self._get_context_obj().addon_ctx[self.name] = value
|
||||
addon_state = self._get_context_obj().addon_ctx[self._owner.__name__]
|
||||
addon_state[self.name] = value
|
||||
|
||||
|
||||
class SessionProperty(BaseAddonProperty[_T, "Session"]):
|
||||
|
||||
@@ -15,6 +15,8 @@ import time
|
||||
from types import ModuleType
|
||||
from typing import *
|
||||
|
||||
import outleap
|
||||
|
||||
from hippolyzer.lib.base.datatypes import UUID
|
||||
from hippolyzer.lib.base.helpers import get_mtime
|
||||
from hippolyzer.lib.base.message.message import Message
|
||||
@@ -172,7 +174,10 @@ class AddonManager:
|
||||
def load_addon_from_path(cls, path, reload=False, raise_exceptions=True):
|
||||
path = pathlib.Path(path).absolute()
|
||||
mod_name = "hippolyzer.user_addon_%s" % path.stem
|
||||
cls.BASE_ADDON_SPECS.append(importlib.util.spec_from_file_location(mod_name, path))
|
||||
spec = importlib.util.spec_from_file_location(mod_name, path)
|
||||
if not spec:
|
||||
raise ValueError(f"Unable to load {path}")
|
||||
cls.BASE_ADDON_SPECS.append(spec)
|
||||
addon_dir = os.path.realpath(pathlib.Path(path).parent.absolute())
|
||||
|
||||
if addon_dir not in sys.path:
|
||||
@@ -199,9 +204,9 @@ class AddonManager:
|
||||
@classmethod
|
||||
def _check_hotreloads(cls):
|
||||
"""Mark addons that rely on changed files for reloading"""
|
||||
for filename, importers in cls.HOTRELOAD_IMPORTERS.items():
|
||||
mtime = get_mtime(filename)
|
||||
if not mtime or mtime == cls.FILE_MTIMES.get(filename, None):
|
||||
for file_path, importers in cls.HOTRELOAD_IMPORTERS.items():
|
||||
mtime = get_mtime(file_path)
|
||||
if not mtime or mtime == cls.FILE_MTIMES.get(file_path, None):
|
||||
continue
|
||||
|
||||
# Mark anything that imported this as dirty too, handling circular
|
||||
@@ -220,10 +225,15 @@ class AddonManager:
|
||||
|
||||
_dirty_importers(importers)
|
||||
|
||||
if file_path not in cls.BASE_ADDON_SPECS:
|
||||
# Make sure we won't reload importers in a loop if this is actually something
|
||||
# that was dynamically imported, where `hot_reload()` might not be called again!
|
||||
cls.FILE_MTIMES[file_path] = mtime
|
||||
|
||||
@classmethod
|
||||
def hot_reload(cls, mod: Any, require_addons_loaded=False):
|
||||
# Solely to trick the type checker because ModuleType doesn't apply where it should
|
||||
# and Protocols aren't well supported yet.
|
||||
# and Protocols aren't well-supported yet.
|
||||
imported_mod: ModuleType = mod
|
||||
imported_file = imported_mod.__file__
|
||||
# Mark the caller as having imported (and being dependent on) `module`
|
||||
@@ -338,11 +348,11 @@ class AddonManager:
|
||||
cls.SCHEDULER.kill_matching_tasks(lifetime_mask=TaskLifeScope.ADDON, creator=addon)
|
||||
|
||||
@classmethod
|
||||
def _call_all_addon_hooks(cls, hook_name, *args, **kwargs):
|
||||
def _call_all_addon_hooks(cls, hook_name, *args, call_async=False, **kwargs):
|
||||
for module in cls.FRESH_ADDON_MODULES.values():
|
||||
if not module:
|
||||
continue
|
||||
ret = cls._call_module_hooks(module, hook_name, *args, **kwargs)
|
||||
ret = cls._call_module_hooks(module, hook_name, *args, call_async=call_async, **kwargs)
|
||||
if ret:
|
||||
return ret
|
||||
|
||||
@@ -373,15 +383,15 @@ class AddonManager:
|
||||
return commands
|
||||
|
||||
@classmethod
|
||||
def _call_module_hooks(cls, module, hook_name, *args, **kwargs):
|
||||
def _call_module_hooks(cls, module, hook_name, *args, call_async=False, **kwargs):
|
||||
for addon in cls._get_module_addons(module):
|
||||
ret = cls._try_call_hook(addon, hook_name, *args, **kwargs)
|
||||
ret = cls._try_call_hook(addon, hook_name, *args, call_async=call_async, **kwargs)
|
||||
if ret:
|
||||
return ret
|
||||
return cls._try_call_hook(module, hook_name, *args, **kwargs)
|
||||
return cls._try_call_hook(module, hook_name, *args, call_async=call_async, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def _try_call_hook(cls, addon, hook_name, *args, **kwargs):
|
||||
def _try_call_hook(cls, addon, hook_name, *args, call_async=False, **kwargs):
|
||||
if cls._SUBPROCESS:
|
||||
return
|
||||
|
||||
@@ -391,6 +401,20 @@ class AddonManager:
|
||||
if not hook_func:
|
||||
return
|
||||
try:
|
||||
if call_async:
|
||||
old_hook_func = hook_func
|
||||
|
||||
# Wrapper so we can invoke an async hook synchronously.
|
||||
def _wrapper(*w_args, **w_kwargs):
|
||||
cls.SCHEDULER.schedule_task(
|
||||
old_hook_func(*w_args, **w_kwargs),
|
||||
scope=TaskLifeScope.ADDON,
|
||||
creator=addon,
|
||||
)
|
||||
# Fall through to any other handlers as well,
|
||||
# async handlers don't chain.
|
||||
return None
|
||||
hook_func = _wrapper
|
||||
return hook_func(*args, **kwargs)
|
||||
except:
|
||||
logging.exception("Exploded in %r's %s hook" % (addon, hook_name))
|
||||
@@ -540,9 +564,9 @@ class AddonManager:
|
||||
|
||||
@classmethod
|
||||
def handle_object_updated(cls, session: Session, region: ProxiedRegion,
|
||||
obj: Object, updated_props: Set[str]):
|
||||
obj: Object, updated_props: Set[str], msg: Optional[Message]):
|
||||
with addon_ctx.push(session, region):
|
||||
return cls._call_all_addon_hooks("handle_object_updated", session, region, obj, updated_props)
|
||||
return cls._call_all_addon_hooks("handle_object_updated", session, region, obj, updated_props, msg)
|
||||
|
||||
@classmethod
|
||||
def handle_object_killed(cls, session: Session, region: ProxiedRegion, obj: Object):
|
||||
@@ -572,3 +596,7 @@ class AddonManager:
|
||||
with addon_ctx.push(session, region):
|
||||
return cls._call_all_addon_hooks("handle_proxied_packet", session_manager,
|
||||
packet, session, region)
|
||||
|
||||
@classmethod
|
||||
def handle_leap_client_added(cls, session_manager: SessionManager, leap_client: outleap.LEAPClient):
|
||||
return cls._call_all_addon_hooks("handle_leap_client_added", session_manager, leap_client, call_async=True)
|
||||
|
||||
@@ -42,7 +42,7 @@ class MITMProxyEventManager:
|
||||
"UpdateNotecardAgentInventory", "UpdateNotecardTaskInventory",
|
||||
"UpdateScriptAgent", "UpdateScriptTask",
|
||||
"UpdateSettingsAgentInventory", "UpdateSettingsTaskInventory",
|
||||
"UploadBakedTexture",
|
||||
"UploadBakedTexture", "UploadAgentProfileImage",
|
||||
}
|
||||
|
||||
def __init__(self, session_manager: SessionManager, flow_context: HTTPFlowContext):
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import datetime as dt
|
||||
import logging
|
||||
|
||||
from hippolyzer.lib.base.helpers import get_mtime
|
||||
from hippolyzer.lib.client.inventory_manager import InventoryManager
|
||||
@@ -25,4 +26,7 @@ class ProxyInventoryManager(InventoryManager):
|
||||
newest_cache = inv_cache_path
|
||||
|
||||
if newest_cache:
|
||||
self.load_cache(newest_cache)
|
||||
try:
|
||||
self.load_cache(newest_cache)
|
||||
except:
|
||||
logging.exception("Failed to load invcache")
|
||||
|
||||
@@ -133,8 +133,9 @@ class ProxyWorldObjectManager(ClientWorldObjectManager):
|
||||
region_mgr.queued_cache_misses |= missing_locals
|
||||
region_mgr.request_missed_cached_objects_soon()
|
||||
|
||||
def _run_object_update_hooks(self, obj: Object, updated_props: Set[str], update_type: ObjectUpdateType):
|
||||
super()._run_object_update_hooks(obj, updated_props, update_type)
|
||||
def _run_object_update_hooks(self, obj: Object, updated_props: Set[str], update_type: ObjectUpdateType,
|
||||
msg: Optional[Message]):
|
||||
super()._run_object_update_hooks(obj, updated_props, update_type, msg)
|
||||
region = self._session.region_by_handle(obj.RegionHandle)
|
||||
if self._settings.ALLOW_AUTO_REQUEST_OBJECTS:
|
||||
if obj.PCode == PCode.AVATAR and "ParentID" in updated_props:
|
||||
@@ -145,7 +146,7 @@ class ProxyWorldObjectManager(ClientWorldObjectManager):
|
||||
# have no way to get a sitting agent's true region location, even if it's ourselves.
|
||||
region.objects.queued_cache_misses.add(obj.ParentID)
|
||||
region.objects.request_missed_cached_objects_soon()
|
||||
AddonManager.handle_object_updated(self._session, region, obj, updated_props)
|
||||
AddonManager.handle_object_updated(self._session, region, obj, updated_props, msg)
|
||||
|
||||
def _run_kill_object_hooks(self, obj: Object):
|
||||
super()._run_kill_object_hooks(obj)
|
||||
|
||||
@@ -189,7 +189,7 @@ class EventQueueManager:
|
||||
# over the EQ. That will allow us to shove our own event onto the response once it comes in,
|
||||
# otherwise we have to wait until the EQ legitimately returns 200 due to a new event.
|
||||
# May or may not work in OpenSim.
|
||||
circuit.send_message(Message(
|
||||
circuit.send(Message(
|
||||
'PlacesQuery',
|
||||
Block('AgentData', AgentID=session.agent_id, SessionID=session.id, QueryID=UUID()),
|
||||
Block('TransactionData', TransactionID=UUID()),
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import dataclasses
|
||||
import datetime
|
||||
import functools
|
||||
@@ -9,6 +10,8 @@ import weakref
|
||||
from typing import *
|
||||
from weakref import ref
|
||||
|
||||
from outleap import LEAPClient
|
||||
|
||||
from hippolyzer.lib.base.datatypes import UUID
|
||||
from hippolyzer.lib.base.helpers import proxify
|
||||
from hippolyzer.lib.base.message.message import Message
|
||||
@@ -41,7 +44,8 @@ class Session(BaseClientSession):
|
||||
self.circuit_code = circuit_code
|
||||
self.global_caps = {}
|
||||
# Bag of arbitrary data addons can use to persist data across addon reloads
|
||||
self.addon_ctx = {}
|
||||
# Each addon name gets its own separate dict within this dict
|
||||
self.addon_ctx: Dict[str, Dict[str, Any]] = collections.defaultdict(dict)
|
||||
self.session_manager: SessionManager = session_manager or None
|
||||
self.selected: SelectionModel = SelectionModel()
|
||||
self.regions: List[ProxiedRegion] = []
|
||||
@@ -50,6 +54,7 @@ class Session(BaseClientSession):
|
||||
self.http_message_handler: MessageHandler[HippoHTTPFlow, str] = MessageHandler()
|
||||
self.objects = ProxyWorldObjectManager(self, session_manager.settings, session_manager.name_cache)
|
||||
self.inventory = ProxyInventoryManager(proxify(self))
|
||||
self.leap_client: Optional[LEAPClient] = None
|
||||
# Base path of a newview type cache directory for this session
|
||||
self.cache_dir: Optional[str] = None
|
||||
self._main_region = None
|
||||
@@ -185,8 +190,9 @@ class SessionManager:
|
||||
self.flow_context = HTTPFlowContext()
|
||||
self.asset_repo = HTTPAssetRepo()
|
||||
self.message_logger: Optional[BaseMessageLogger] = None
|
||||
self.addon_ctx: Dict[str, Any] = {}
|
||||
self.addon_ctx: Dict[str, Dict[str, Any]] = collections.defaultdict(dict)
|
||||
self.name_cache = ProxyNameCache()
|
||||
self.pending_leap_clients: List[LEAPClient] = []
|
||||
|
||||
def create_session(self, login_data) -> Session:
|
||||
session = Session.from_login_data(login_data, self)
|
||||
@@ -195,6 +201,15 @@ class SessionManager:
|
||||
session.http_message_handler,
|
||||
)
|
||||
self.sessions.append(session)
|
||||
# TODO: less crap way of tying a LEAP client to a session
|
||||
while self.pending_leap_clients:
|
||||
leap_client = self.pending_leap_clients.pop(-1)
|
||||
# Client may have gone bad since it connected
|
||||
if not leap_client.connected:
|
||||
continue
|
||||
logging.info("Assigned LEAP client to session")
|
||||
session.leap_client = leap_client
|
||||
break
|
||||
logging.info("Created %r" % session)
|
||||
return session
|
||||
|
||||
@@ -209,6 +224,8 @@ class SessionManager:
|
||||
def close_session(self, session: Session):
|
||||
logging.info("Closed %r" % session)
|
||||
session.objects.clear()
|
||||
if session.leap_client:
|
||||
session.leap_client.disconnect()
|
||||
self.sessions.remove(session)
|
||||
|
||||
def resolve_cap(self, url: str) -> Optional["CapData"]:
|
||||
@@ -218,6 +235,10 @@ class SessionManager:
|
||||
return cap_data
|
||||
return CapData()
|
||||
|
||||
async def leap_client_connected(self, leap_client: LEAPClient):
|
||||
self.pending_leap_clients.append(leap_client)
|
||||
AddonManager.handle_leap_client_added(self, leap_client)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class SelectionModel:
|
||||
|
||||
@@ -25,6 +25,7 @@ class EnvSettingDescriptor(SettingDescriptor):
|
||||
class ProxySettings(Settings):
|
||||
SOCKS_PROXY_PORT: int = EnvSettingDescriptor(9061, "HIPPO_UDP_PORT", int)
|
||||
HTTP_PROXY_PORT: int = EnvSettingDescriptor(9062, "HIPPO_HTTP_PORT", int)
|
||||
LEAP_PORT: int = EnvSettingDescriptor(9063, "HIPPO_LEAP_PORT", int)
|
||||
PROXY_BIND_ADDR: str = EnvSettingDescriptor("127.0.0.1", "HIPPO_BIND_HOST", str)
|
||||
REMOTELY_ACCESSIBLE: bool = SettingDescriptor(False)
|
||||
USE_VIEWER_OBJECT_CACHE: bool = SettingDescriptor(False)
|
||||
|
||||
@@ -108,4 +108,7 @@ CAP_TEMPLATES: List[CAPTemplate] = [
|
||||
CAPTemplate(cap_name='ViewerBenefits', method='GET', body=b'', query=set(), path=''),
|
||||
CAPTemplate(cap_name='SetDisplayName', method='POST', body=b'<?xml version="1.0" ?>\n<llsd>\n<map>\n <key>display_name</key>\n <array>\n <string>OLD_DISPLAY_NAME</string>\n <string>NEW_DISPLAY_NAME</string>\n </array>\n </map>\n</llsd>\n', query=set(), path=''),
|
||||
CAPTemplate(cap_name='ObjectMediaNavigate', method='POST', body=b'<?xml version="1.0" ?>\n<llsd>\n<map>\n <key>current_url</key>\n <string></string>\n <key>object_id</key>\n <uuid><!HIPPOREPL[[SELECTED_FULL]]></uuid>\n <key>texture_index</key>\n <integer></integer>\n </map>\n</llsd>\n', query=set(), path=''),
|
||||
CAPTemplate(cap_name='AgentProfile', method='GET', body=b'', query=set(), path='/<SOME_ID>'),
|
||||
CAPTemplate(cap_name='InterestList', method='POST', body=b'<?xml version="1.0" ?>\n<llsd>\n<map>\n <key>mode</key>\n <string>360</string>\n </map>\n</llsd>', query=set(), path='/'),
|
||||
CAPTemplate(cap_name='RegionObjects', method='GET', body=b'', query=set(), path=''),
|
||||
]
|
||||
|
||||
@@ -139,7 +139,7 @@ class ViewerObjectCache:
|
||||
return RegionViewerObjectCache.from_file(objects_file)
|
||||
|
||||
|
||||
class ViewerObjectCacheEntry(recordclass.datatuple): # type: ignore
|
||||
class ViewerObjectCacheEntry(recordclass.dataobject): # type: ignore
|
||||
local_id: int
|
||||
crc: int
|
||||
data: bytes
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
aiohttp==3.8.1
|
||||
aiohttp==3.8.3
|
||||
aiosignal==1.2.0
|
||||
appdirs==1.4.4
|
||||
Arpeggio==1.10.2
|
||||
@@ -7,14 +7,15 @@ async-timeout==4.0.1
|
||||
attrs==21.2.0
|
||||
blinker==1.4
|
||||
Brotli==1.0.9
|
||||
certifi==2021.10.8
|
||||
certifi==2022.12.7
|
||||
cffi==1.15.0
|
||||
charset-normalizer==2.0.9
|
||||
click==8.0.3
|
||||
cryptography==36.0.2
|
||||
defusedxml==0.7.1
|
||||
Flask==2.0.2
|
||||
frozenlist==1.2.0
|
||||
frozenlist==1.3.3
|
||||
gltflib==1.0.13
|
||||
Glymur==0.9.6
|
||||
h11==0.12.0
|
||||
h2==4.1.0
|
||||
@@ -27,13 +28,14 @@ Jinja2==3.0.3
|
||||
kaitaistruct==0.9
|
||||
lazy-object-proxy==1.6.0
|
||||
ldap3==2.9.1
|
||||
llbase==1.2.11
|
||||
lxml==4.6.4
|
||||
llsd~=1.0.0
|
||||
lxml==4.9.2
|
||||
MarkupSafe==2.0.1
|
||||
mitmproxy==8.0.0
|
||||
msgpack==1.0.3
|
||||
multidict==5.2.0
|
||||
numpy==1.21.4
|
||||
numpy==1.24.2
|
||||
outleap~=0.4.1
|
||||
parso==0.8.3
|
||||
passlib==1.7.4
|
||||
prompt-toolkit==3.0.23
|
||||
@@ -47,13 +49,13 @@ Pygments==2.10.0
|
||||
pyOpenSSL==22.0.0
|
||||
pyparsing==2.4.7
|
||||
pyperclip==1.8.2
|
||||
PySide6==6.2.2
|
||||
PySide6-Essentials==6.4.2
|
||||
qasync==0.22.0
|
||||
recordclass==0.14.3
|
||||
recordclass==0.18.2
|
||||
requests==2.26.0
|
||||
ruamel.yaml==0.17.16
|
||||
ruamel.yaml.clib==0.2.6
|
||||
shiboken6==6.2.2
|
||||
ruamel.yaml==0.17.21
|
||||
ruamel.yaml.clib==0.2.7
|
||||
shiboken6==6.4.2
|
||||
six==1.16.0
|
||||
sortedcontainers==2.4.0
|
||||
tornado==6.1
|
||||
@@ -64,5 +66,5 @@ urwid==2.1.2
|
||||
wcwidth==0.2.5
|
||||
Werkzeug==2.0.2
|
||||
wsproto==1.0.0
|
||||
yarl==1.7.2
|
||||
zstandard==0.15.2
|
||||
yarl==1.8.2
|
||||
zstandard<0.18.0
|
||||
14
setup.py
14
setup.py
@@ -25,7 +25,7 @@ from setuptools import setup, find_packages
|
||||
|
||||
here = path.abspath(path.dirname(__file__))
|
||||
|
||||
version = '0.12.0'
|
||||
version = '0.13.3'
|
||||
|
||||
with open(path.join(here, 'README.md')) as readme_fh:
|
||||
readme = readme_fh.read()
|
||||
@@ -45,6 +45,7 @@ setup(
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: Implementation :: CPython",
|
||||
"Topic :: System :: Networking :: Monitoring",
|
||||
"Topic :: Software Development :: Libraries :: Python Modules",
|
||||
@@ -75,16 +76,18 @@ setup(
|
||||
entry_points={
|
||||
'console_scripts': {
|
||||
'hippolyzer-gui = hippolyzer.apps.proxy_gui:gui_main',
|
||||
'hippolyzer-cli = hippolyzer.apps.proxy:main'
|
||||
'hippolyzer-cli = hippolyzer.apps.proxy:main',
|
||||
}
|
||||
},
|
||||
zip_safe=False,
|
||||
python_requires='>=3.8',
|
||||
install_requires=[
|
||||
'llbase>=1.2.5',
|
||||
'llsd<1.1.0',
|
||||
'outleap<1.0',
|
||||
'defusedxml',
|
||||
'aiohttp<4.0.0',
|
||||
'recordclass<0.15',
|
||||
# Newer recordclasses break!
|
||||
'recordclass>0.15,<0.18.3',
|
||||
'lazy-object-proxy',
|
||||
'arpeggio',
|
||||
# requests breaks with newer idna
|
||||
@@ -97,11 +100,12 @@ setup(
|
||||
'Glymur<0.9.7',
|
||||
'numpy<2.0',
|
||||
# These could be in extras_require if you don't want a GUI.
|
||||
'pyside6',
|
||||
'pyside6-essentials',
|
||||
'qasync',
|
||||
# Needed for mesh format conversion tooling
|
||||
'pycollada',
|
||||
'transformations',
|
||||
'gltflib',
|
||||
],
|
||||
tests_require=[
|
||||
"pytest",
|
||||
|
||||
@@ -3,6 +3,7 @@ import setuptools # noqa
|
||||
import os
|
||||
import shutil
|
||||
from distutils.core import Command
|
||||
from importlib.metadata import version
|
||||
from pathlib import Path
|
||||
|
||||
from cx_Freeze import setup, Executable
|
||||
@@ -113,7 +114,7 @@ executables = [
|
||||
|
||||
setup(
|
||||
name="hippolyzer_gui",
|
||||
version="0.9.0",
|
||||
version=version("hippolyzer"),
|
||||
description="Hippolyzer GUI",
|
||||
options=options,
|
||||
executables=executables,
|
||||
|
||||
@@ -40,6 +40,8 @@ class TestMesh(unittest.TestCase):
|
||||
writer.write(serializer, reader.read(serializer))
|
||||
second_buf = writer.copy_buffer()
|
||||
self.assertEqual(first_buf, second_buf)
|
||||
# Dates may not round-trip correctly, but length should always be the same
|
||||
self.assertEqual(len(first_buf), len(self.slm_bytes))
|
||||
|
||||
def test_serialize_raw_segments(self):
|
||||
serializer = LLMeshSerializer(include_raw_segments=True)
|
||||
|
||||
@@ -89,7 +89,7 @@ class _MutableMultiDictTests:
|
||||
d = create_instance()
|
||||
s = pickle.dumps(d, protocol)
|
||||
ud = pickle.loads(s)
|
||||
assert type(ud) == type(d)
|
||||
assert type(ud) is type(d)
|
||||
assert ud == d
|
||||
alternative = pickle.dumps(create_instance("werkzeug"), protocol)
|
||||
assert pickle.loads(alternative) == d
|
||||
|
||||
@@ -6,6 +6,8 @@ import uuid
|
||||
from io import BytesIO
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
from hippolyzer.lib.base.datatypes import *
|
||||
import hippolyzer.lib.base.serialization as se
|
||||
from hippolyzer.lib.base.llanim import Animation, Joint, RotKeyframe
|
||||
@@ -693,6 +695,46 @@ class NameValueSerializationTests(BaseSerializationTest):
|
||||
deser.to_dict()
|
||||
|
||||
|
||||
class NumPySerializationTests(BaseSerializationTest):
|
||||
def setUp(self) -> None:
|
||||
super().setUp()
|
||||
self.writer.endianness = "<"
|
||||
|
||||
def test_simple(self):
|
||||
quant_spec = se.Vector3U16(0.0, 1.0)
|
||||
self.writer.write(quant_spec, Vector3(0, 0.1, 0))
|
||||
self.writer.write(quant_spec, Vector3(1, 1, 1))
|
||||
|
||||
reader = self._get_reader()
|
||||
np_spec = se.NumPyArray(se.BytesGreedy(), np.dtype(np.uint16), 3)
|
||||
np_val = reader.read(np_spec)
|
||||
expected_arr = np.array([[0, 6554, 0], [0xFFFF, 0xFFFF, 0xFFFF]], dtype=np.uint16)
|
||||
np.testing.assert_array_equal(expected_arr, np_val)
|
||||
|
||||
# Make sure writing the array back works correctly
|
||||
orig_buf = self.writer.copy_buffer()
|
||||
self.writer.clear()
|
||||
self.writer.write(np_spec, expected_arr)
|
||||
self.assertEqual(orig_buf, self.writer.copy_buffer())
|
||||
|
||||
def test_quantization(self):
|
||||
quant_spec = se.Vector3U16(0.0, 1.0)
|
||||
self.writer.write(quant_spec, Vector3(0, 0.1, 0))
|
||||
self.writer.write(quant_spec, Vector3(1, 1, 1))
|
||||
|
||||
reader = self._get_reader()
|
||||
np_spec = se.QuantizedNumPyArray(se.NumPyArray(se.BytesGreedy(), np.dtype(np.uint16), 3), 0.0, 1.0)
|
||||
np_val = reader.read(np_spec)
|
||||
expected_arr = np.array([[0, 0.1, 0], [1, 1, 1]], dtype=np.float64)
|
||||
np.testing.assert_array_almost_equal(expected_arr, np_val, decimal=5)
|
||||
|
||||
# Make sure writing the array back works correctly
|
||||
orig_buf = self.writer.copy_buffer()
|
||||
self.writer.clear()
|
||||
self.writer.write(np_spec, expected_arr)
|
||||
self.assertEqual(orig_buf, self.writer.copy_buffer())
|
||||
|
||||
|
||||
class AnimSerializationTests(BaseSerializationTest):
|
||||
SIMPLE_ANIM = b'\x01\x00\x00\x00\x01\x00\x00\x00H\x11\xd1?\x00\x00\x00\x00\x00H\x11\xd1?\x00\x00\x00\x00' \
|
||||
b'\xcd\xccL>\x9a\x99\x99>\x01\x00\x00\x00\x02\x00\x00\x00mNeck\x00\x01\x00\x00\x00\x03\x00' \
|
||||
|
||||
32
tests/base/test_skeleton.py
Normal file
32
tests/base/test_skeleton.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from hippolyzer.lib.base.mesh_skeleton import load_avatar_skeleton
|
||||
|
||||
|
||||
class TestSkeleton(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls) -> None:
|
||||
cls.skeleton = load_avatar_skeleton()
|
||||
|
||||
def test_get_joint(self):
|
||||
node = self.skeleton["mNeck"]
|
||||
self.assertEqual("mNeck", node.name)
|
||||
self.assertEqual(self.skeleton, node.skeleton())
|
||||
|
||||
def test_get_joint_index(self):
|
||||
self.assertEqual(7, self.skeleton["mNeck"].index)
|
||||
self.assertEqual(113, self.skeleton["mKneeLeft"].index)
|
||||
|
||||
def test_get_joint_parent(self):
|
||||
self.assertEqual("mChest", self.skeleton["mNeck"].parent().name)
|
||||
|
||||
def test_get_joint_matrix(self):
|
||||
expected_mat = np.array([
|
||||
[1., 0., 0., -0.01],
|
||||
[0., 1., 0., 0.],
|
||||
[0., 0., 1., 0.251],
|
||||
[0., 0., 0., 1.]
|
||||
])
|
||||
np.testing.assert_equal(expected_mat, self.skeleton["mNeck"].matrix)
|
||||
@@ -88,12 +88,12 @@ class AddonIntegrationTests(BaseProxyTest):
|
||||
self._setup_default_circuit()
|
||||
self._fake_command("foobar baz")
|
||||
await self._wait_drained()
|
||||
self.assertEqual(self.session.addon_ctx["bazquux"], "baz")
|
||||
self.assertEqual(self.session.addon_ctx["MockAddon"]["bazquux"], "baz")
|
||||
|
||||
# In session context these should be equivalent
|
||||
with addon_ctx.push(new_session=self.session):
|
||||
self.assertEqual(self.session.addon_ctx["bazquux"], self.addon.bazquux)
|
||||
self.assertEqual(self.session.addon_ctx["another"], "baz")
|
||||
self.assertEqual(self.session.addon_ctx["MockAddon"]["bazquux"], self.addon.bazquux)
|
||||
self.assertEqual(self.session.addon_ctx["MockAddon"]["another"], "baz")
|
||||
|
||||
# Outside session context it should raise
|
||||
with self.assertRaises(AttributeError):
|
||||
@@ -104,7 +104,7 @@ class AddonIntegrationTests(BaseProxyTest):
|
||||
|
||||
self.session.addon_ctx.clear()
|
||||
with addon_ctx.push(new_session=self.session):
|
||||
# This has no default so should fail
|
||||
# This has no default so it should fail
|
||||
with self.assertRaises(AttributeError):
|
||||
_something = self.addon.bazquux
|
||||
# This has a default
|
||||
@@ -144,9 +144,9 @@ class AddonIntegrationTests(BaseProxyTest):
|
||||
AddonManager.load_addon_from_path(str(self.parent_path), reload=True)
|
||||
# Wait for the init hooks to run
|
||||
await asyncio.sleep(0.001)
|
||||
self.assertFalse("quux" in self.session_manager.addon_ctx)
|
||||
self.assertFalse("quux" in self.session_manager.addon_ctx["ParentAddon"])
|
||||
parent_addon_mod = AddonManager.FRESH_ADDON_MODULES['hippolyzer.user_addon_parent_addon']
|
||||
self.assertEqual(0, parent_addon_mod.ParentAddon.quux)
|
||||
self.assertEqual(0, self.session_manager.addon_ctx["quux"])
|
||||
self.assertEqual(0, self.session_manager.addon_ctx["ParentAddon"]["quux"])
|
||||
parent_addon_mod.ParentAddon.quux = 1
|
||||
self.assertEqual(1, self.session_manager.addon_ctx["quux"])
|
||||
self.assertEqual(1, self.session_manager.addon_ctx["ParentAddon"]["quux"])
|
||||
|
||||
@@ -36,7 +36,7 @@ class MockAddon(BaseAddon):
|
||||
return True
|
||||
|
||||
def handle_object_updated(self, session: Session, region: ProxiedRegion,
|
||||
obj: Object, updated_props: Set[str]):
|
||||
obj: Object, updated_props: Set[str], msg: Optional[Message]):
|
||||
self.events.append(("object_update", session.id, region.circuit_addr, obj.LocalID, updated_props))
|
||||
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ class ObjectTrackingAddon(BaseAddon):
|
||||
super().__init__()
|
||||
self.events = []
|
||||
|
||||
def handle_object_updated(self, session, region, obj: Object, updated_props: Set[str]):
|
||||
def handle_object_updated(self, session, region, obj: Object, updated_props: Set[str], msg: Optional[Message]):
|
||||
self.events.append(("update", obj, updated_props))
|
||||
|
||||
def handle_object_killed(self, session, region, obj: Object):
|
||||
|
||||
Reference in New Issue
Block a user