Files
copyparty/copyparty/httpsrv.py

543 lines
17 KiB
Python
Raw Normal View History

2019-05-26 16:30:19 +00:00
# coding: utf-8
2019-06-12 16:39:43 +00:00
from __future__ import print_function, unicode_literals
2019-05-26 16:30:19 +00:00
import base64
2022-06-16 01:07:15 +02:00
import math
import os
import re
import socket
2022-06-16 01:07:15 +02:00
import sys
2019-05-26 16:30:19 +00:00
import threading
2022-06-16 01:07:15 +02:00
import time
import queue
2019-05-26 16:30:19 +00:00
from .__init__ import ANYWIN, CORES, EXE, MACOS, TYPE_CHECKING, EnvParams
2023-02-14 21:50:14 +00:00
try:
MNFE = ModuleNotFoundError
except:
MNFE = ImportError
try:
import jinja2
except MNFE:
2023-02-14 21:50:14 +00:00
if EXE:
raise
print(
"""\033[1;31m
you do not have jinja2 installed,\033[33m
choose one of these:\033[0m
* apt install python-jinja2
* {} -m pip install --user jinja2
* (try another python version, if you have one)
* (try copyparty.sfx instead)
""".format(
sys.executable
)
)
sys.exit(1)
except SyntaxError:
if EXE:
raise
print(
"""\033[1;31m
your jinja2 version is incompatible with your python version;\033[33m
please try to replace it with an older version:\033[0m
* {} -m pip install --user jinja2==2.11.3
* (try another python version, if you have one)
* (try copyparty.sfx instead)
""".format(
sys.executable
)
)
sys.exit(1)
2021-06-07 20:09:18 +02:00
from .httpconn import HttpConn
2023-08-20 17:58:06 +00:00
from .metrics import Metrics
from .u2idx import U2idx
from .util import (
2022-10-23 23:08:00 +02:00
E_SCK,
FHC,
2022-10-23 12:05:44 +02:00
Daemon,
2022-09-21 22:27:20 +02:00
Garda,
Magician,
2022-12-03 13:31:00 +00:00
Netdev,
2022-12-03 18:58:56 +00:00
NetMap,
absreal,
build_netmap,
ipnorm,
min_ex,
shut_socket,
spack,
start_log_thrs,
start_stackmon,
)
2019-05-26 16:30:19 +00:00
2022-06-16 01:07:15 +02:00
if TYPE_CHECKING:
from .broker_util import BrokerCli
2022-11-22 21:40:12 +00:00
from .ssdp import SSDPr
2022-06-16 01:07:15 +02:00
2022-10-29 20:40:25 +00:00
if True: # pylint: disable=using-constant-test
2022-06-16 01:07:15 +02:00
from typing import Any, Optional
2021-07-09 03:41:44 +02:00
2019-05-26 16:30:19 +00:00
class HttpSrv(object):
"""
2019-06-06 08:18:00 +02:00
handles incoming connections using HttpConn to process http,
2019-05-26 16:30:19 +00:00
relying on MpSrv for performance (HttpSrv is just plain threads)
"""
2022-06-16 01:07:15 +02:00
def __init__(self, broker: "BrokerCli", nid: Optional[int]) -> None:
2019-07-01 02:42:29 +02:00
self.broker = broker
self.nid = nid
2019-07-01 02:42:29 +02:00
self.args = broker.args
2022-09-10 17:33:04 +02:00
self.E: EnvParams = self.args.E
2019-07-01 02:42:29 +02:00
self.log = broker.log
2021-06-11 23:01:13 +02:00
self.asrv = broker.asrv
2019-05-26 16:30:19 +00:00
2022-08-31 01:16:09 +02:00
# redefine in case of multiprocessing
socket.setdefaulttimeout(120)
2023-08-20 17:58:06 +00:00
self.t0 = time.time()
2021-10-27 22:20:21 +02:00
nsuf = "-n{}-i{:x}".format(nid, os.getpid()) if nid else ""
self.magician = Magician()
self.nm = NetMap([], {})
2022-11-22 21:40:12 +00:00
self.ssdp: Optional["SSDPr"] = None
2022-09-21 22:27:20 +02:00
self.gpwd = Garda(self.args.ban_pw)
self.g404 = Garda(self.args.ban_404)
self.g403 = Garda(self.args.ban_403)
self.g422 = Garda(self.args.ban_422, False)
self.gmal = Garda(self.args.ban_422)
self.gurl = Garda(self.args.ban_url)
2022-10-31 22:42:47 +00:00
self.bans: dict[str, int] = {}
self.aclose: dict[str, int] = {}
2023-01-11 16:10:07 +00:00
self.bound: set[tuple[str, int]] = set()
2021-10-27 22:20:21 +02:00
self.name = "hsrv" + nsuf
2019-05-28 20:49:58 +00:00
self.mutex = threading.Lock()
up2k: fix a mostly-harmless race as each chunk is written to the file, httpcli calls up2k.confirm_chunk to register the chunk as completed, and the reply indicates whether that was the final outstanding chunk, in which case httpcli closes the file descriptors since there's nothing more to write the issue is that the final chunk is registered as completed before the file descriptors are closed, meaning there could be writes that haven't finished flushing to disk yet if the client decides to issue another handshake during this window, up2k sees that all chunks are complete and calls up2k.finish_upload even as some threads might still be flushing the final writes to disk so the conditions to hit this bug were as follows (all must be true): * multiprocessing is disabled * there is a reverse-proxy * a client has several idle connections and reuses one of those * the server's filesystem is EXTREMELY slow, to the point where closing a file takes over 30 seconds the fix is to stop handshakes from being processed while a file is being closed, which is unfortunately a small bottleneck in that it prohibits initiating another upload while one is being finalized, but the required complexity to handle this better is probably not worth it (a separate mutex for each upload session or something like that) this issue is mostly harmless, partially because it is super tricky to hit (only aware of it happening synthetically), and because there is usually no harmful consequences; the worst-case is if this were to happen exactly as the server OS decides to crash, which would make the file appear to be fully uploaded even though it's missing some data (all extremely unlikely, but not impossible) there is no performance impact; if anything it should now accept new tcp connections slightly faster thanks to more granular locking
2024-02-13 19:24:06 +00:00
self.u2mutex = threading.Lock()
2021-07-09 15:49:36 +02:00
self.stopping = False
2019-05-28 20:49:58 +00:00
2021-07-09 03:41:44 +02:00
self.tp_nthr = 0 # actual
self.tp_ncli = 0 # fading
2022-06-16 01:07:15 +02:00
self.tp_time = 0.0 # latest worker collect
self.tp_q: Optional[queue.LifoQueue[Any]] = (
None if self.args.no_htp else queue.LifoQueue()
)
self.t_periodic: Optional[threading.Thread] = None
2021-07-09 03:41:44 +02:00
self.u2fh = FHC()
2023-08-20 17:58:06 +00:00
self.metrics = Metrics(self)
self.nreq = 0
self.nsus = 0
self.nban = 0
2022-06-16 01:07:15 +02:00
self.srvs: list[socket.socket] = []
2021-07-09 16:48:02 +02:00
self.ncli = 0 # exact
2022-06-16 01:07:15 +02:00
self.clients: set[HttpConn] = set() # laggy
2021-07-09 16:48:02 +02:00
self.nclimax = 0
2022-06-16 01:07:15 +02:00
self.cb_ts = 0.0
self.cb_v = ""
2019-05-26 16:30:19 +00:00
self.u2idx_free: dict[str, U2idx] = {}
self.u2idx_n = 0
env = jinja2.Environment()
2022-09-10 17:33:04 +02:00
env.loader = jinja2.FileSystemLoader(os.path.join(self.E.mod, "web"))
jn = ["splash", "svcs", "browser", "browser2", "msg", "md", "mde", "cf"]
self.j2 = {x: env.get_template(x + ".html") for x in jn}
2022-09-10 17:33:04 +02:00
zs = os.path.join(self.E.mod, "web", "deps", "prism.js.gz")
self.prism = os.path.exists(zs)
self.ipa_nm = build_netmap(self.args.ipa)
self.xff_nm = build_netmap(self.args.xff_src)
self.xff_lan = build_netmap("lan")
self.statics: set[str] = set()
self._build_statics()
self.ptn_cc = re.compile(r"[\x00-\x1f]")
self.ptn_hsafe = re.compile(r"[\x00-\x1f<>\"'&]")
self.mallow = "GET HEAD POST PUT DELETE OPTIONS".split()
if not self.args.no_dav:
zs = "PROPFIND PROPPATCH LOCK UNLOCK MKCOL COPY MOVE"
self.mallow += zs.split()
2022-11-22 21:40:12 +00:00
if self.args.zs:
from .ssdp import SSDPr
self.ssdp = SSDPr(broker)
2021-07-09 03:41:44 +02:00
if self.tp_q:
self.start_threads(4)
if nid:
if self.args.stackmon:
start_stackmon(self.args.stackmon, nid)
if self.args.log_thrs:
start_log_thrs(self.log, self.args.log_thrs, nid)
self.th_cfg: dict[str, set[str]] = {}
2022-10-23 12:05:44 +02:00
Daemon(self.post_init, "hsrv-init2")
2022-06-12 16:37:56 +02:00
2022-06-16 01:07:15 +02:00
def post_init(self) -> None:
2022-06-12 16:37:56 +02:00
try:
2022-06-16 01:07:15 +02:00
x = self.broker.ask("thumbsrv.getcfg")
2022-06-12 16:37:56 +02:00
self.th_cfg = x.get()
except:
pass
def _build_statics(self) -> None:
for dp, _, df in os.walk(os.path.join(self.E.mod, "web")):
for fn in df:
ap = absreal(os.path.join(dp, fn))
self.statics.add(ap)
if ap.endswith(".gz"):
self.statics.add(ap[:-3])
2022-12-03 13:31:00 +00:00
def set_netdevs(self, netdevs: dict[str, Netdev]) -> None:
2023-01-11 16:10:07 +00:00
ips = set()
for ip, _ in self.bound:
ips.add(ip)
self.nm = NetMap(list(ips), list(netdevs))
2022-06-16 01:07:15 +02:00
def start_threads(self, n: int) -> None:
2021-07-09 03:41:44 +02:00
self.tp_nthr += n
if self.args.log_htp:
2021-07-09 16:07:16 +02:00
self.log(self.name, "workers += {} = {}".format(n, self.tp_nthr), 6)
2021-07-09 03:41:44 +02:00
for _ in range(n):
2022-10-23 12:05:44 +02:00
Daemon(self.thr_poolw, self.name + "-poolw")
2021-07-09 03:41:44 +02:00
2022-06-16 01:07:15 +02:00
def stop_threads(self, n: int) -> None:
2021-07-09 03:41:44 +02:00
self.tp_nthr -= n
if self.args.log_htp:
2021-07-09 16:07:16 +02:00
self.log(self.name, "workers -= {} = {}".format(n, self.tp_nthr), 6)
2021-07-09 03:41:44 +02:00
2022-06-16 01:07:15 +02:00
assert self.tp_q
2021-07-09 03:41:44 +02:00
for _ in range(n):
self.tp_q.put(None)
2022-06-16 01:07:15 +02:00
def periodic(self) -> None:
2021-07-09 03:41:44 +02:00
while True:
2021-10-27 22:20:21 +02:00
time.sleep(2 if self.tp_ncli or self.ncli else 10)
up2k: fix a mostly-harmless race as each chunk is written to the file, httpcli calls up2k.confirm_chunk to register the chunk as completed, and the reply indicates whether that was the final outstanding chunk, in which case httpcli closes the file descriptors since there's nothing more to write the issue is that the final chunk is registered as completed before the file descriptors are closed, meaning there could be writes that haven't finished flushing to disk yet if the client decides to issue another handshake during this window, up2k sees that all chunks are complete and calls up2k.finish_upload even as some threads might still be flushing the final writes to disk so the conditions to hit this bug were as follows (all must be true): * multiprocessing is disabled * there is a reverse-proxy * a client has several idle connections and reuses one of those * the server's filesystem is EXTREMELY slow, to the point where closing a file takes over 30 seconds the fix is to stop handshakes from being processed while a file is being closed, which is unfortunately a small bottleneck in that it prohibits initiating another upload while one is being finalized, but the required complexity to handle this better is probably not worth it (a separate mutex for each upload session or something like that) this issue is mostly harmless, partially because it is super tricky to hit (only aware of it happening synthetically), and because there is usually no harmful consequences; the worst-case is if this were to happen exactly as the server OS decides to crash, which would make the file appear to be fully uploaded even though it's missing some data (all extremely unlikely, but not impossible) there is no performance impact; if anything it should now accept new tcp connections slightly faster thanks to more granular locking
2024-02-13 19:24:06 +00:00
with self.u2mutex, self.mutex:
self.u2fh.clean()
if self.tp_q:
self.tp_ncli = max(self.ncli, self.tp_ncli - 2)
if self.tp_nthr > self.tp_ncli + 8:
self.stop_threads(4)
2021-07-09 03:41:44 +02:00
2021-10-27 22:20:21 +02:00
if not self.ncli and not self.u2fh.cache and self.tp_nthr <= 8:
self.t_periodic = None
return
2022-06-16 01:07:15 +02:00
def listen(self, sck: socket.socket, nlisteners: int) -> None:
if self.args.j != 1:
# lost in the pickle; redefine
if not ANYWIN or self.args.reuseaddr:
sck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sck.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sck.settimeout(None) # < does not inherit, ^ opts above do
2023-01-11 16:10:07 +00:00
ip, port = sck.getsockname()[:2]
2021-07-09 15:49:36 +02:00
self.srvs.append(sck)
2023-01-11 16:10:07 +00:00
self.bound.add((ip, port))
2021-07-09 16:48:02 +02:00
self.nclimax = math.ceil(self.args.nc * 1.0 / nlisteners)
2022-10-23 12:05:44 +02:00
Daemon(
self.thr_listen,
2023-01-11 16:10:07 +00:00
"httpsrv-n{}-listen-{}-{}".format(self.nid or "0", ip, port),
2022-10-23 12:05:44 +02:00
(sck,),
)
2021-07-09 15:49:36 +02:00
2022-06-16 01:07:15 +02:00
def thr_listen(self, srv_sck: socket.socket) -> None:
2021-07-09 15:49:36 +02:00
"""listens on a shared tcp server"""
2022-11-06 16:48:05 +00:00
ip, port = srv_sck.getsockname()[:2]
2021-07-09 15:49:36 +02:00
fno = srv_sck.fileno()
2022-11-06 16:48:05 +00:00
hip = "[{}]".format(ip) if ":" in ip else ip
msg = "subscribed @ {}:{} f{} p{}".format(hip, port, fno, os.getpid())
2021-07-09 16:07:16 +02:00
self.log(self.name, msg)
2021-10-29 23:43:09 +02:00
2022-06-16 01:07:15 +02:00
def fun() -> None:
self.broker.say("cb_httpsrv_up")
2021-10-29 23:43:09 +02:00
2022-08-10 23:35:21 +02:00
threading.Thread(target=fun, name="sig-hsrv-up1").start()
2021-10-29 23:43:09 +02:00
2021-07-09 15:49:36 +02:00
while not self.stopping:
if self.args.log_conn:
2022-10-19 15:30:17 +02:00
self.log(self.name, "|%sC-ncli" % ("-" * 1,), c="90")
2021-07-09 15:49:36 +02:00
2022-10-31 22:42:47 +00:00
spins = 0
while self.ncli >= self.nclimax:
if not spins:
self.log(self.name, "at connection limit; waiting", 3)
spins += 1
time.sleep(0.1)
if spins != 50 or not self.args.aclose:
2022-10-31 22:42:47 +00:00
continue
ipfreq: dict[str, int] = {}
with self.mutex:
for c in self.clients:
ip = ipnorm(c.ip)
2022-10-31 22:42:47 +00:00
try:
ipfreq[ip] += 1
2022-10-31 22:42:47 +00:00
except:
ipfreq[ip] = 1
2022-10-31 22:42:47 +00:00
ip, n = sorted(ipfreq.items(), key=lambda x: x[1], reverse=True)[0]
if n < self.nclimax / 2:
continue
self.aclose[ip] = int(time.time() + self.args.aclose * 60)
2022-10-31 22:42:47 +00:00
nclose = 0
nloris = 0
nconn = 0
2022-10-31 22:42:47 +00:00
with self.mutex:
for c in self.clients:
cip = ipnorm(c.ip)
if ip != cip:
2022-10-31 22:42:47 +00:00
continue
nconn += 1
2022-10-31 22:42:47 +00:00
try:
if (
c.nreq >= 1
or not c.cli
or c.cli.in_hdr_recv
or c.cli.keepalive
):
2022-10-31 22:42:47 +00:00
Daemon(c.shutdown)
nclose += 1
if c.nreq <= 0 and (not c.cli or c.cli.in_hdr_recv):
nloris += 1
2022-10-31 22:42:47 +00:00
except:
pass
t = "{} downgraded to connection:close for {} min; dropped {}/{} connections"
self.log(self.name, t.format(ip, self.args.aclose, nclose, nconn), 1)
if nloris < nconn / 2:
continue
t = "slowloris (idle-conn): {} banned for {} min"
2022-12-02 17:53:23 +00:00
self.log(self.name, t.format(ip, self.args.loris, nclose), 1)
self.bans[ip] = int(time.time() + self.args.loris * 60)
2021-07-09 15:49:36 +02:00
if self.args.log_conn:
2022-10-19 15:30:17 +02:00
self.log(self.name, "|%sC-acc1" % ("-" * 2,), c="90")
2021-07-09 15:49:36 +02:00
try:
2022-11-06 16:48:05 +00:00
sck, saddr = srv_sck.accept()
cip, cport = saddr[:2]
if cip.startswith("::ffff:"):
cip = cip[7:]
addr = (cip, cport)
2021-07-09 15:49:36 +02:00
except (OSError, socket.error) as ex:
2022-12-02 23:42:46 +00:00
if self.stopping:
break
2021-07-09 16:07:16 +02:00
self.log(self.name, "accept({}): {}".format(fno, ex), c=6)
2021-07-09 16:11:33 +02:00
time.sleep(0.02)
2021-07-09 15:49:36 +02:00
continue
if self.args.log_conn:
2022-06-16 01:07:15 +02:00
t = "|{}C-acc2 \033[0;36m{} \033[3{}m{}".format(
2021-07-09 15:49:36 +02:00
"-" * 3, ip, port % 8, port
)
2022-10-19 15:30:17 +02:00
self.log("%s %s" % addr, t, c="90")
2021-07-09 15:49:36 +02:00
self.accept(sck, addr)
2022-06-16 01:07:15 +02:00
def accept(self, sck: socket.socket, addr: tuple[str, int]) -> None:
2019-05-26 16:30:19 +00:00
"""takes an incoming tcp connection and creates a thread to handle it"""
2021-07-09 03:41:44 +02:00
now = time.time()
2021-07-13 00:54:23 +02:00
if now - (self.tp_time or now) > 300:
2022-06-16 01:07:15 +02:00
t = "httpserver threadpool died: tpt {:.2f}, now {:.2f}, nthr {}, ncli {}"
self.log(self.name, t.format(self.tp_time, now, self.tp_nthr, self.ncli), 1)
self.tp_time = 0
2021-07-09 03:41:44 +02:00
self.tp_q = None
2021-08-03 22:53:13 +00:00
with self.mutex:
self.ncli += 1
2021-10-27 22:20:21 +02:00
if not self.t_periodic:
name = "hsrv-pt"
if self.nid:
name += "-%d" % (self.nid,)
2021-10-27 22:20:21 +02:00
2022-10-23 12:05:44 +02:00
self.t_periodic = Daemon(self.periodic, name)
2021-10-27 22:20:21 +02:00
2021-08-03 22:53:13 +00:00
if self.tp_q:
2021-07-09 03:41:44 +02:00
self.tp_time = self.tp_time or now
2021-08-03 22:53:13 +00:00
self.tp_ncli = max(self.tp_ncli, self.ncli)
2021-07-09 16:33:11 +02:00
if self.tp_nthr < self.ncli + 4:
2021-07-09 03:41:44 +02:00
self.start_threads(8)
2021-08-03 22:53:13 +00:00
self.tp_q.put((sck, addr))
return
2021-07-09 03:41:44 +02:00
if not self.args.no_htp:
2022-06-16 01:07:15 +02:00
t = "looks like the httpserver threadpool died; please make an issue on github and tell me the story of how you pulled that off, thanks and dog bless\n"
self.log(self.name, t, 1)
2021-07-09 03:41:44 +02:00
2022-10-23 12:05:44 +02:00
Daemon(
self.thr_client,
"httpconn-%s-%d" % (addr[0].split(".", 2)[-1][-6:], addr[1]),
2022-10-23 12:05:44 +02:00
(sck, addr),
2021-06-08 20:14:23 +02:00
)
2019-05-26 16:30:19 +00:00
2022-06-16 01:07:15 +02:00
def thr_poolw(self) -> None:
assert self.tp_q
2021-07-09 03:41:44 +02:00
while True:
task = self.tp_q.get()
if not task:
break
with self.mutex:
2022-06-16 01:07:15 +02:00
self.tp_time = 0
2021-07-09 03:41:44 +02:00
try:
sck, addr = task
me = threading.current_thread()
me.name = "httpconn-%s-%d" % (addr[0].split(".", 2)[-1][-6:], addr[1])
2021-07-09 03:41:44 +02:00
self.thr_client(sck, addr)
me.name = self.name + "-poolw"
2022-07-27 16:07:26 +02:00
except Exception as ex:
if str(ex).startswith("client d/c "):
self.log(self.name, "thr_client: " + str(ex), 6)
else:
self.log(self.name, "thr_client: " + min_ex(), 3)
2021-07-09 03:41:44 +02:00
2022-06-16 01:07:15 +02:00
def shutdown(self) -> None:
2021-07-09 15:49:36 +02:00
self.stopping = True
for srv in self.srvs:
try:
srv.close()
except:
pass
2022-08-31 08:38:34 +02:00
thrs = []
2022-06-16 01:07:15 +02:00
clients = list(self.clients)
2021-06-18 00:30:37 +02:00
for cli in clients:
2022-08-31 08:38:34 +02:00
t = threading.Thread(target=cli.shutdown)
thrs.append(t)
t.start()
2021-06-18 00:30:37 +02:00
2021-07-09 16:07:16 +02:00
if self.tp_q:
self.stop_threads(self.tp_nthr)
for _ in range(10):
time.sleep(0.05)
if self.tp_q.empty():
break
2022-08-31 08:38:34 +02:00
for t in thrs:
t.join()
self.log(self.name, "ok bye")
2019-05-28 19:36:42 +00:00
2022-06-16 01:07:15 +02:00
def thr_client(self, sck: socket.socket, addr: tuple[str, int]) -> None:
2019-05-26 16:30:19 +00:00
"""thread managing one tcp client"""
2019-07-01 02:42:29 +02:00
cli = HttpConn(sck, addr, self)
2019-06-06 08:18:00 +02:00
with self.mutex:
2022-06-16 01:07:15 +02:00
self.clients.add(cli)
2019-05-26 16:30:19 +00:00
# print("{}\n".format(len(self.clients)), end="")
2021-06-18 00:30:37 +02:00
fno = sck.fileno()
2019-06-06 08:18:00 +02:00
try:
2021-03-21 16:19:45 +01:00
if self.args.log_conn:
2022-10-19 15:30:17 +02:00
self.log("%s %s" % addr, "|%sC-crun" % ("-" * 4,), c="90")
2021-03-21 16:19:45 +01:00
2019-05-26 16:30:19 +00:00
cli.run()
2021-06-18 00:30:37 +02:00
except (OSError, socket.error) as ex:
2022-10-19 15:21:48 +02:00
if ex.errno not in E_SCK:
2021-06-18 00:30:37 +02:00
self.log(
"%s %s" % addr,
"run({}): {}".format(fno, ex),
c=6,
)
2019-05-26 16:30:19 +00:00
finally:
2021-06-08 09:40:49 +02:00
sck = cli.s
2021-03-21 16:19:45 +01:00
if self.args.log_conn:
2022-10-19 15:30:17 +02:00
self.log("%s %s" % addr, "|%sC-cdone" % ("-" * 5,), c="90")
2021-03-21 16:19:45 +01:00
2019-07-03 22:25:51 +00:00
try:
2021-06-18 00:30:37 +02:00
fno = sck.fileno()
2022-08-31 01:16:09 +02:00
shut_socket(cli.log, sck)
2019-07-03 22:25:51 +00:00
except (OSError, socket.error) as ex:
2020-05-13 00:39:29 +02:00
if not MACOS:
self.log(
"%s %s" % addr,
2021-06-18 00:30:37 +02:00
"shut({}): {}".format(fno, ex),
2022-10-19 15:30:17 +02:00
c="90",
2020-05-13 00:39:29 +02:00
)
2022-10-19 15:21:48 +02:00
if ex.errno not in E_SCK:
2019-07-03 22:25:51 +00:00
raise
finally:
with self.mutex:
2022-06-16 01:07:15 +02:00
self.clients.remove(cli)
2021-07-09 16:33:11 +02:00
self.ncli -= 1
2019-07-03 22:25:51 +00:00
if cli.u2idx:
self.put_u2idx(str(addr), cli.u2idx)
2022-06-16 01:07:15 +02:00
def cachebuster(self) -> str:
if time.time() - self.cb_ts < 1:
return self.cb_v
with self.mutex:
if time.time() - self.cb_ts < 1:
return self.cb_v
2022-09-10 17:33:04 +02:00
v = self.E.t0
try:
2022-09-10 17:33:04 +02:00
with os.scandir(os.path.join(self.E.mod, "web")) as dh:
for fh in dh:
2021-07-24 22:20:02 +02:00
inf = fh.stat()
v = max(v, inf.st_mtime)
except:
pass
2021-07-08 23:35:28 +02:00
v = base64.urlsafe_b64encode(spack(b">xxL", int(v)))
self.cb_v = v.decode("ascii")[-4:]
self.cb_ts = time.time()
return self.cb_v
def get_u2idx(self, ident: str) -> Optional[U2idx]:
utab = self.u2idx_free
for _ in range(100): # 5/0.05 = 5sec
with self.mutex:
if utab:
if ident in utab:
return utab.pop(ident)
return utab.pop(list(utab.keys())[0])
if self.u2idx_n < CORES:
self.u2idx_n += 1
return U2idx(self)
time.sleep(0.05)
# not using conditional waits, on a hunch that
# average performance will be faster like this
# since most servers won't be fully saturated
return None
def put_u2idx(self, ident: str, u2idx: U2idx) -> None:
with self.mutex:
while ident in self.u2idx_free:
ident += "a"
self.u2idx_free[ident] = u2idx