+
+
+"""
+
+DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8"
+
+class HTTPServer(socketserver.TCPServer):
+
+ allow_reuse_address = 1 # Seems to make sense in testing environment
+
+ def server_bind(self):
+ """Override server_bind to store the server name."""
+ socketserver.TCPServer.server_bind(self)
+ host, port = self.server_address[:2]
+ self.server_name = socket.getfqdn(host)
+ self.server_port = port
+
+
+class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
+
+ """HTTP request handler base class.
+
+ The following explanation of HTTP serves to guide you through the
+ code as well as to expose any misunderstandings I may have about
+ HTTP (so you don't need to read the code to figure out I'm wrong
+ :-).
+
+ HTTP (HyperText Transfer Protocol) is an extensible protocol on
+ top of a reliable stream transport (e.g. TCP/IP). The protocol
+ recognizes three parts to a request:
+
+ 1. One line identifying the request type and path
+ 2. An optional set of RFC-822-style headers
+ 3. An optional data part
+
+ The headers and data are separated by a blank line.
+
+ The first line of the request has the form
+
+
+
+ where is a (case-sensitive) keyword such as GET or POST,
+ is a string containing path information for the request,
+ and should be the string "HTTP/1.0" or "HTTP/1.1".
+ is encoded using the URL encoding scheme (using %xx to signify
+ the ASCII character with hex code xx).
+
+ The specification specifies that lines are separated by CRLF but
+ for compatibility with the widest range of clients recommends
+ servers also handle LF. Similarly, whitespace in the request line
+ is treated sensibly (allowing multiple spaces between components
+ and allowing trailing whitespace).
+
+ Similarly, for output, lines ought to be separated by CRLF pairs
+ but most clients grok LF characters just fine.
+
+ If the first line of the request has the form
+
+
+
+ (i.e. is left out) then this is assumed to be an HTTP
+ 0.9 request; this form has no optional headers and data part and
+ the reply consists of just the data.
+
+ The reply form of the HTTP 1.x protocol again has three parts:
+
+ 1. One line giving the response code
+ 2. An optional set of RFC-822-style headers
+ 3. The data
+
+ Again, the headers and data are separated by a blank line.
+
+ The response code line has the form
+
+
+
+ where is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
+ is a 3-digit response code indicating success or
+ failure of the request, and is an optional
+ human-readable string explaining what the response code means.
+
+ This server parses the request and the headers, and then calls a
+ function specific to the request type (). Specifically,
+ a request SPAM will be handled by a method do_SPAM(). If no
+ such method exists the server sends an error response to the
+ client. If it exists, it is called with no arguments:
+
+ do_SPAM()
+
+ Note that the request name is case sensitive (i.e. SPAM and spam
+ are different requests).
+
+ The various request details are stored in instance variables:
+
+ - client_address is the client IP address in the form (host,
+ port);
+
+ - command, path and version are the broken-down request line;
+
+ - headers is an instance of email.message.Message (or a derived
+ class) containing the header information;
+
+ - rfile is a file object open for reading positioned at the
+ start of the optional input data part;
+
+ - wfile is a file object open for writing.
+
+ IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
+
+ The first thing to be written must be the response line. Then
+ follow 0 or more header lines, then a blank line, and then the
+ actual data (if any). The meaning of the header lines depends on
+ the command executed by the server; in most cases, when data is
+ returned, there should be at least one header line of the form
+
+ Content-type: /
+
+ where and should be registered MIME types,
+ e.g. "text/html" or "text/plain".
+
+ """
+
+ # The Python system version, truncated to its first component.
+ sys_version = "Python/" + sys.version.split()[0]
+
+ # The server software version. You may want to override this.
+ # The format is multiple whitespace-separated strings,
+ # where each string is of the form name[/version].
+ server_version = "BaseHTTP/" + __version__
+
+ error_message_format = DEFAULT_ERROR_MESSAGE
+ error_content_type = DEFAULT_ERROR_CONTENT_TYPE
+
+ # The default request version. This only affects responses up until
+ # the point where the request line is parsed, so it mainly decides what
+ # the client gets back when sending a malformed request line.
+ # Most web servers default to HTTP 0.9, i.e. don't send a status line.
+ default_request_version = "HTTP/0.9"
+
+ def parse_request(self):
+ """Parse a request (internal).
+
+ The request should be stored in self.raw_requestline; the results
+ are in self.command, self.path, self.request_version and
+ self.headers.
+
+ Return True for success, False for failure; on failure, an
+ error is sent back.
+
+ """
+ self.command = None # set in case of error on the first line
+ self.request_version = version = self.default_request_version
+ self.close_connection = True
+ requestline = str(self.raw_requestline, 'iso-8859-1')
+ requestline = requestline.rstrip('\r\n')
+ self.requestline = requestline
+ words = requestline.split()
+ if len(words) == 3:
+ command, path, version = words
+ try:
+ if version[:5] != 'HTTP/':
+ raise ValueError
+ base_version_number = version.split('/', 1)[1]
+ version_number = base_version_number.split(".")
+ # RFC 2145 section 3.1 says there can be only one "." and
+ # - major and minor numbers MUST be treated as
+ # separate integers;
+ # - HTTP/2.4 is a lower version than HTTP/2.13, which in
+ # turn is lower than HTTP/12.3;
+ # - Leading zeros MUST be ignored by recipients.
+ if len(version_number) != 2:
+ raise ValueError
+ version_number = int(version_number[0]), int(version_number[1])
+ except (ValueError, IndexError):
+ self.send_error(
+ HTTPStatus.BAD_REQUEST,
+ "Bad request version (%r)" % version)
+ return False
+ if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
+ self.close_connection = False
+ if version_number >= (2, 0):
+ self.send_error(
+ HTTPStatus.HTTP_VERSION_NOT_SUPPORTED,
+ "Invalid HTTP version (%s)" % base_version_number)
+ return False
+ elif len(words) == 2:
+ command, path = words
+ self.close_connection = True
+ if command != 'GET':
+ self.send_error(
+ HTTPStatus.BAD_REQUEST,
+ "Bad HTTP/0.9 request type (%r)" % command)
+ return False
+ elif not words:
+ return False
+ else:
+ self.send_error(
+ HTTPStatus.BAD_REQUEST,
+ "Bad request syntax (%r)" % requestline)
+ return False
+ self.command, self.path, self.request_version = command, path, version
+
+ # Examine the headers and look for a Connection directive.
+ try:
+ self.headers = http_client.parse_headers(self.rfile,
+ _class=self.MessageClass)
+ except http_client.LineTooLong as err:
+ self.send_error(
+ HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
+ "Line too long",
+ str(err))
+ return False
+ except http_client.HTTPException as err:
+ self.send_error(
+ HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
+ "Too many headers",
+ str(err)
+ )
+ return False
+
+ conntype = self.headers.get('Connection', "")
+ if conntype.lower() == 'close':
+ self.close_connection = True
+ elif (conntype.lower() == 'keep-alive' and
+ self.protocol_version >= "HTTP/1.1"):
+ self.close_connection = False
+ # Examine the headers and look for an Expect directive
+ expect = self.headers.get('Expect', "")
+ if (expect.lower() == "100-continue" and
+ self.protocol_version >= "HTTP/1.1" and
+ self.request_version >= "HTTP/1.1"):
+ if not self.handle_expect_100():
+ return False
+ return True
+
+ def handle_expect_100(self):
+ """Decide what to do with an "Expect: 100-continue" header.
+
+ If the client is expecting a 100 Continue response, we must
+ respond with either a 100 Continue or a final response before
+ waiting for the request body. The default is to always respond
+ with a 100 Continue. You can behave differently (for example,
+ reject unauthorized requests) by overriding this method.
+
+ This method should either return True (possibly after sending
+ a 100 Continue response) or send an error response and return
+ False.
+
+ """
+ self.send_response_only(HTTPStatus.CONTINUE)
+ self.end_headers()
+ return True
+
+ def handle_one_request(self):
+ """Handle a single HTTP request.
+
+ You normally don't need to override this method; see the class
+ __doc__ string for information on how to handle specific HTTP
+ commands such as GET and POST.
+
+ """
+ try:
+ self.raw_requestline = self.rfile.readline(65537)
+ if len(self.raw_requestline) > 65536:
+ self.requestline = ''
+ self.request_version = ''
+ self.command = ''
+ self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG)
+ return
+ if not self.raw_requestline:
+ self.close_connection = True
+ return
+ if not self.parse_request():
+ # An error code has been sent, just exit
+ return
+ mname = 'do_' + self.command
+ if not hasattr(self, mname):
+ self.send_error(
+ HTTPStatus.NOT_IMPLEMENTED,
+ "Unsupported method (%r)" % self.command)
+ return
+ method = getattr(self, mname)
+ method()
+ self.wfile.flush() #actually send the response if not already done.
+ except socket.timeout as e:
+ #a read or a write timed out. Discard this connection
+ self.log_error("Request timed out: %r", e)
+ self.close_connection = True
+ return
+
+ def handle(self):
+ """Handle multiple requests if necessary."""
+ self.close_connection = True
+
+ self.handle_one_request()
+ while not self.close_connection:
+ self.handle_one_request()
+
+ def send_error(self, code, message=None, explain=None):
+ """Send and log an error reply.
+
+ Arguments are
+ * code: an HTTP error code
+ 3 digits
+ * message: a simple optional 1 line reason phrase.
+ *( HTAB / SP / VCHAR / %x80-FF )
+ defaults to short entry matching the response code
+ * explain: a detailed message defaults to the long entry
+ matching the response code.
+
+ This sends an error response (so it must be called before any
+ output has been generated), logs the error, and finally sends
+ a piece of HTML explaining the error to the user.
+
+ """
+
+ try:
+ shortmsg, longmsg = self.responses[code]
+ except KeyError:
+ shortmsg, longmsg = '???', '???'
+ if message is None:
+ message = shortmsg
+ if explain is None:
+ explain = longmsg
+ self.log_error("code %d, message %s", code, message)
+ self.send_response(code, message)
+ self.send_header('Connection', 'close')
+
+ # Message body is omitted for cases described in:
+ # - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified)
+ # - RFC7231: 6.3.6. 205(Reset Content)
+ body = None
+ if (code >= 200 and
+ code not in (HTTPStatus.NO_CONTENT,
+ HTTPStatus.RESET_CONTENT,
+ HTTPStatus.NOT_MODIFIED)):
+ # HTML encode to prevent Cross Site Scripting attacks
+ # (see bug #1100201)
+ content = (self.error_message_format % {
+ 'code': code,
+ 'message': html.escape(message, quote=False),
+ 'explain': html.escape(explain, quote=False)
+ })
+ body = content.encode('UTF-8', 'replace')
+ self.send_header("Content-Type", self.error_content_type)
+ self.send_header('Content-Length', int(len(body)))
+ self.end_headers()
+
+ if self.command != 'HEAD' and body:
+ self.wfile.write(body)
+
+ def send_response(self, code, message=None):
+ """Add the response header to the headers buffer and log the
+ response code.
+
+ Also send two standard headers with the server software
+ version and the current date.
+
+ """
+ self.log_request(code)
+ self.send_response_only(code, message)
+ self.send_header('Server', self.version_string())
+ self.send_header('Date', self.date_time_string())
+
+ def send_response_only(self, code, message=None):
+ """Send the response header only."""
+ if self.request_version != 'HTTP/0.9':
+ if message is None:
+ if code in self.responses:
+ message = self.responses[code][0]
+ else:
+ message = ''
+ if not hasattr(self, '_headers_buffer'):
+ self._headers_buffer = []
+ self._headers_buffer.append(("%s %d %s\r\n" %
+ (self.protocol_version, code, message)).encode(
+ 'latin-1', 'strict'))
+
+ def send_header(self, keyword, value):
+ """Send a MIME header to the headers buffer."""
+ if self.request_version != 'HTTP/0.9':
+ if not hasattr(self, '_headers_buffer'):
+ self._headers_buffer = []
+ self._headers_buffer.append(
+ ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict'))
+
+ if keyword.lower() == 'connection':
+ if value.lower() == 'close':
+ self.close_connection = True
+ elif value.lower() == 'keep-alive':
+ self.close_connection = False
+
+ def end_headers(self):
+ """Send the blank line ending the MIME headers."""
+ if self.request_version != 'HTTP/0.9':
+ self._headers_buffer.append(b"\r\n")
+ self.flush_headers()
+
+ def flush_headers(self):
+ if hasattr(self, '_headers_buffer'):
+ self.wfile.write(b"".join(self._headers_buffer))
+ self._headers_buffer = []
+
+ def log_request(self, code='-', size='-'):
+ """Log an accepted request.
+
+ This is called by send_response().
+
+ """
+ if isinstance(code, HTTPStatus):
+ code = code.value
+ self.log_message('"%s" %s %s',
+ self.requestline, str(code), str(size))
+
+ def log_error(self, format, *args):
+ """Log an error.
+
+ This is called when a request cannot be fulfilled. By
+ default it passes the message on to log_message().
+
+ Arguments are the same as for log_message().
+
+ XXX This should go to the separate error log.
+
+ """
+
+ self.log_message(format, *args)
+
+ def log_message(self, format, *args):
+ """Log an arbitrary message.
+
+ This is used by all other logging functions. Override
+ it if you have specific logging wishes.
+
+ The first argument, FORMAT, is a format string for the
+ message to be logged. If the format string contains
+ any % escapes requiring parameters, they should be
+ specified as subsequent arguments (it's just like
+ printf!).
+
+ The client ip and current date/time are prefixed to
+ every message.
+
+ """
+
+ sys.stderr.write("%s - - [%s] %s\n" %
+ (self.address_string(),
+ self.log_date_time_string(),
+ format%args))
+
+ def version_string(self):
+ """Return the server software version string."""
+ return self.server_version + ' ' + self.sys_version
+
+ def date_time_string(self, timestamp=None):
+ """Return the current date and time formatted for a message header."""
+ if timestamp is None:
+ timestamp = time.time()
+ return email.utils.formatdate(timestamp, usegmt=True)
+
+ def log_date_time_string(self):
+ """Return the current time formatted for logging."""
+ now = time.time()
+ year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
+ s = "%02d/%3s/%04d %02d:%02d:%02d" % (
+ day, self.monthname[month], year, hh, mm, ss)
+ return s
+
+ weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+ monthname = [None,
+ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+ def address_string(self):
+ """Return the client address."""
+
+ return self.client_address[0]
+
+ # Essentially static class variables
+
+ # The version of the HTTP protocol we support.
+ # Set this to HTTP/1.1 to enable automatic keepalive
+ protocol_version = "HTTP/1.0"
+
+ # MessageClass used to parse headers
+ MessageClass = http_client.HTTPMessage
+
+ # hack to maintain backwards compatibility
+ responses = {
+ v: (v.phrase, v.description)
+ for v in HTTPStatus.__members__.values()
+ }
+
+
+class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
+
+ """Simple HTTP request handler with GET and HEAD commands.
+
+ This serves files from the current directory and any of its
+ subdirectories. The MIME type for files is determined by
+ calling the .guess_type() method.
+
+ The GET and HEAD requests are identical except that the HEAD
+ request omits the actual contents of the file.
+
+ """
+
+ server_version = "SimpleHTTP/" + __version__
+
+ def do_GET(self):
+ """Serve a GET request."""
+ f = self.send_head()
+ if f:
+ try:
+ self.copyfile(f, self.wfile)
+ finally:
+ f.close()
+
+ def do_HEAD(self):
+ """Serve a HEAD request."""
+ f = self.send_head()
+ if f:
+ f.close()
+
+ def send_head(self):
+ """Common code for GET and HEAD commands.
+
+ This sends the response code and MIME headers.
+
+ Return value is either a file object (which has to be copied
+ to the outputfile by the caller unless the command was HEAD,
+ and must be closed by the caller under all circumstances), or
+ None, in which case the caller has nothing further to do.
+
+ """
+ path = self.translate_path(self.path)
+ f = None
+ if os.path.isdir(path):
+ parts = urllib.parse.urlsplit(self.path)
+ if not parts.path.endswith('/'):
+ # redirect browser - doing basically what apache does
+ self.send_response(HTTPStatus.MOVED_PERMANENTLY)
+ new_parts = (parts[0], parts[1], parts[2] + '/',
+ parts[3], parts[4])
+ new_url = urllib.parse.urlunsplit(new_parts)
+ self.send_header("Location", new_url)
+ self.end_headers()
+ return None
+ for index in "index.html", "index.htm":
+ index = os.path.join(path, index)
+ if os.path.exists(index):
+ path = index
+ break
+ else:
+ return self.list_directory(path)
+ ctype = self.guess_type(path)
+ try:
+ f = open(path, 'rb')
+ except OSError:
+ self.send_error(HTTPStatus.NOT_FOUND, "File not found")
+ return None
+ try:
+ self.send_response(HTTPStatus.OK)
+ self.send_header("Content-type", ctype)
+ fs = os.fstat(f.fileno())
+ self.send_header("Content-Length", str(fs[6]))
+ self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
+ self.end_headers()
+ return f
+ except:
+ f.close()
+ raise
+
+ def list_directory(self, path):
+ """Helper to produce a directory listing (absent index.html).
+
+ Return value is either a file object, or None (indicating an
+ error). In either case, the headers are sent, making the
+ interface the same as for send_head().
+
+ """
+ try:
+ list = os.listdir(path)
+ except OSError:
+ self.send_error(
+ HTTPStatus.NOT_FOUND,
+ "No permission to list directory")
+ return None
+ list.sort(key=lambda a: a.lower())
+ r = []
+ try:
+ displaypath = urllib.parse.unquote(self.path,
+ errors='surrogatepass')
+ except UnicodeDecodeError:
+ displaypath = urllib.parse.unquote(path)
+ displaypath = html.escape(displaypath, quote=False)
+ enc = sys.getfilesystemencoding()
+ title = 'Directory listing for %s' % displaypath
+ r.append('')
+ r.append('\n')
+ r.append('' % enc)
+ r.append('%s\n' % title)
+ r.append('\n
%s
' % title)
+ r.append('\n
')
+ for name in list:
+ fullname = os.path.join(path, name)
+ displayname = linkname = name
+ # Append / for directories or @ for symbolic links
+ if os.path.isdir(fullname):
+ displayname = name + "/"
+ linkname = name + "/"
+ if os.path.islink(fullname):
+ displayname = name + "@"
+ # Note: a link to a directory displays with @ and links with /
+ r.append('
\n\n\n\n')
+ encoded = '\n'.join(r).encode(enc, 'surrogateescape')
+ f = io.BytesIO()
+ f.write(encoded)
+ f.seek(0)
+ self.send_response(HTTPStatus.OK)
+ self.send_header("Content-type", "text/html; charset=%s" % enc)
+ self.send_header("Content-Length", str(len(encoded)))
+ self.end_headers()
+ return f
+
+ def translate_path(self, path):
+ """Translate a /-separated PATH to the local filename syntax.
+
+ Components that mean special things to the local file system
+ (e.g. drive or directory names) are ignored. (XXX They should
+ probably be diagnosed.)
+
+ """
+ # abandon query parameters
+ path = path.split('?',1)[0]
+ path = path.split('#',1)[0]
+ # Don't forget explicit trailing slash when normalizing. Issue17324
+ trailing_slash = path.rstrip().endswith('/')
+ try:
+ path = urllib.parse.unquote(path, errors='surrogatepass')
+ except UnicodeDecodeError:
+ path = urllib.parse.unquote(path)
+ path = posixpath.normpath(path)
+ words = path.split('/')
+ words = filter(None, words)
+ path = os.getcwd()
+ for word in words:
+ if os.path.dirname(word) or word in (os.curdir, os.pardir):
+ # Ignore components that are not a simple file/directory name
+ continue
+ path = os.path.join(path, word)
+ if trailing_slash:
+ path += '/'
+ return path
+
+ def copyfile(self, source, outputfile):
+ """Copy all data between two file objects.
+
+ The SOURCE argument is a file object open for reading
+ (or anything with a read() method) and the DESTINATION
+ argument is a file object open for writing (or
+ anything with a write() method).
+
+ The only reason for overriding this would be to change
+ the block size or perhaps to replace newlines by CRLF
+ -- note however that this the default server uses this
+ to copy binary data as well.
+
+ """
+ shutil.copyfileobj(source, outputfile)
+
+ def guess_type(self, path):
+ """Guess the type of a file.
+
+ Argument is a PATH (a filename).
+
+ Return value is a string of the form type/subtype,
+ usable for a MIME Content-type header.
+
+ The default implementation looks the file's extension
+ up in the table self.extensions_map, using application/octet-stream
+ as a default; however it would be permissible (if
+ slow) to look inside the data to make a better guess.
+
+ """
+
+ base, ext = posixpath.splitext(path)
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ ext = ext.lower()
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ else:
+ return self.extensions_map['']
+
+ if not mimetypes.inited:
+ mimetypes.init() # try to read system mime.types
+ extensions_map = mimetypes.types_map.copy()
+ extensions_map.update({
+ '': 'application/octet-stream', # Default
+ '.py': 'text/plain',
+ '.c': 'text/plain',
+ '.h': 'text/plain',
+ })
+
+
+# Utilities for CGIHTTPRequestHandler
+
+def _url_collapse_path(path):
+ """
+ Given a URL path, remove extra '/'s and '.' path elements and collapse
+ any '..' references and returns a collapsed path.
+
+ Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
+ The utility of this function is limited to is_cgi method and helps
+ preventing some security attacks.
+
+ Returns: The reconstituted URL, which will always start with a '/'.
+
+ Raises: IndexError if too many '..' occur within the path.
+
+ """
+ # Query component should not be involved.
+ path, _, query = path.partition('?')
+ path = urllib.parse.unquote(path)
+
+ # Similar to os.path.split(os.path.normpath(path)) but specific to URL
+ # path semantics rather than local operating system semantics.
+ path_parts = path.split('/')
+ head_parts = []
+ for part in path_parts[:-1]:
+ if part == '..':
+ head_parts.pop() # IndexError if more '..' than prior parts
+ elif part and part != '.':
+ head_parts.append( part )
+ if path_parts:
+ tail_part = path_parts.pop()
+ if tail_part:
+ if tail_part == '..':
+ head_parts.pop()
+ tail_part = ''
+ elif tail_part == '.':
+ tail_part = ''
+ else:
+ tail_part = ''
+
+ if query:
+ tail_part = '?'.join((tail_part, query))
+
+ splitpath = ('/' + '/'.join(head_parts), tail_part)
+ collapsed_path = "/".join(splitpath)
+
+ return collapsed_path
+
+
+
+nobody = None
+
+def nobody_uid():
+ """Internal routine to get nobody's uid"""
+ global nobody
+ if nobody:
+ return nobody
+ try:
+ import pwd
+ except ImportError:
+ return -1
+ try:
+ nobody = pwd.getpwnam('nobody')[2]
+ except KeyError:
+ nobody = 1 + max(x[2] for x in pwd.getpwall())
+ return nobody
+
+
+def executable(path):
+ """Test for executable file."""
+ return os.access(path, os.X_OK)
+
+
+class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
+
+ """Complete HTTP server with GET, HEAD and POST commands.
+
+ GET and HEAD also support running CGI scripts.
+
+ The POST command is *only* implemented for CGI scripts.
+
+ """
+
+ # Determine platform specifics
+ have_fork = hasattr(os, 'fork')
+
+ # Make rfile unbuffered -- we need to read one line and then pass
+ # the rest to a subprocess, so we can't use buffered input.
+ rbufsize = 0
+
+ def do_POST(self):
+ """Serve a POST request.
+
+ This is only implemented for CGI scripts.
+
+ """
+
+ if self.is_cgi():
+ self.run_cgi()
+ else:
+ self.send_error(
+ HTTPStatus.NOT_IMPLEMENTED,
+ "Can only POST to CGI scripts")
+
+ def send_head(self):
+ """Version of send_head that support CGI scripts"""
+ if self.is_cgi():
+ return self.run_cgi()
+ else:
+ return SimpleHTTPRequestHandler.send_head(self)
+
+ def is_cgi(self):
+ """Test whether self.path corresponds to a CGI script.
+
+ Returns True and updates the cgi_info attribute to the tuple
+ (dir, rest) if self.path requires running a CGI script.
+ Returns False otherwise.
+
+ If any exception is raised, the caller should assume that
+ self.path was rejected as invalid and act accordingly.
+
+ The default implementation tests whether the normalized url
+ path begins with one of the strings in self.cgi_directories
+ (and the next character is a '/' or the end of the string).
+
+ """
+ collapsed_path = _url_collapse_path(self.path)
+ dir_sep = collapsed_path.find('/', 1)
+ head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
+ if head in self.cgi_directories:
+ self.cgi_info = head, tail
+ return True
+ return False
+
+
+ cgi_directories = ['/cgi-bin', '/htbin']
+
+ def is_executable(self, path):
+ """Test whether argument path is an executable file."""
+ return executable(path)
+
+ def is_python(self, path):
+ """Test whether argument path is a Python script."""
+ head, tail = os.path.splitext(path)
+ return tail.lower() in (".py", ".pyw")
+
+ def run_cgi(self):
+ """Execute a CGI script."""
+ dir, rest = self.cgi_info
+ path = dir + '/' + rest
+ i = path.find('/', len(dir)+1)
+ while i >= 0:
+ nextdir = path[:i]
+ nextrest = path[i+1:]
+
+ scriptdir = self.translate_path(nextdir)
+ if os.path.isdir(scriptdir):
+ dir, rest = nextdir, nextrest
+ i = path.find('/', len(dir)+1)
+ else:
+ break
+
+ # find an explicit query string, if present.
+ rest, _, query = rest.partition('?')
+
+ # dissect the part after the directory name into a script name &
+ # a possible additional path, to be stored in PATH_INFO.
+ i = rest.find('/')
+ if i >= 0:
+ script, rest = rest[:i], rest[i:]
+ else:
+ script, rest = rest, ''
+
+ scriptname = dir + '/' + script
+ scriptfile = self.translate_path(scriptname)
+ if not os.path.exists(scriptfile):
+ self.send_error(
+ HTTPStatus.NOT_FOUND,
+ "No such CGI script (%r)" % scriptname)
+ return
+ if not os.path.isfile(scriptfile):
+ self.send_error(
+ HTTPStatus.FORBIDDEN,
+ "CGI script is not a plain file (%r)" % scriptname)
+ return
+ ispy = self.is_python(scriptname)
+ if self.have_fork or not ispy:
+ if not self.is_executable(scriptfile):
+ self.send_error(
+ HTTPStatus.FORBIDDEN,
+ "CGI script is not executable (%r)" % scriptname)
+ return
+
+ # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
+ # XXX Much of the following could be prepared ahead of time!
+ env = copy.deepcopy(os.environ)
+ env['SERVER_SOFTWARE'] = self.version_string()
+ env['SERVER_NAME'] = self.server.server_name
+ env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+ env['SERVER_PROTOCOL'] = self.protocol_version
+ env['SERVER_PORT'] = str(self.server.server_port)
+ env['REQUEST_METHOD'] = self.command
+ uqrest = urllib.parse.unquote(rest)
+ env['PATH_INFO'] = uqrest
+ env['PATH_TRANSLATED'] = self.translate_path(uqrest)
+ env['SCRIPT_NAME'] = scriptname
+ if query:
+ env['QUERY_STRING'] = query
+ env['REMOTE_ADDR'] = self.client_address[0]
+ authorization = self.headers.get("authorization")
+ if authorization:
+ authorization = authorization.split()
+ if len(authorization) == 2:
+ import base64, binascii
+ env['AUTH_TYPE'] = authorization[0]
+ if authorization[0].lower() == "basic":
+ try:
+ authorization = authorization[1].encode('ascii')
+ authorization = base64.decodebytes(authorization).\
+ decode('ascii')
+ except (binascii.Error, UnicodeError):
+ pass
+ else:
+ authorization = authorization.split(':')
+ if len(authorization) == 2:
+ env['REMOTE_USER'] = authorization[0]
+ # XXX REMOTE_IDENT
+ if self.headers.get('content-type') is None:
+ env['CONTENT_TYPE'] = self.headers.get_content_type()
+ else:
+ env['CONTENT_TYPE'] = self.headers['content-type']
+ length = self.headers.get('content-length')
+ if length:
+ env['CONTENT_LENGTH'] = length
+ referer = self.headers.get('referer')
+ if referer:
+ env['HTTP_REFERER'] = referer
+ accept = []
+ for line in self.headers.getallmatchingheaders('accept'):
+ if line[:1] in "\t\n\r ":
+ accept.append(line.strip())
+ else:
+ accept = accept + line[7:].split(',')
+ env['HTTP_ACCEPT'] = ','.join(accept)
+ ua = self.headers.get('user-agent')
+ if ua:
+ env['HTTP_USER_AGENT'] = ua
+ co = filter(None, self.headers.get_all('cookie', []))
+ cookie_str = ', '.join(co)
+ if cookie_str:
+ env['HTTP_COOKIE'] = cookie_str
+ # XXX Other HTTP_* headers
+ # Since we're setting the env in the parent, provide empty
+ # values to override previously set values
+ for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
+ 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
+ env.setdefault(k, "")
+
+ self.send_response(HTTPStatus.OK, "Script output follows")
+ self.flush_headers()
+
+ decoded_query = query.replace('+', ' ')
+
+ if self.have_fork:
+ # Unix -- fork as we should
+ args = [script]
+ if '=' not in decoded_query:
+ args.append(decoded_query)
+ nobody = nobody_uid()
+ self.wfile.flush() # Always flush before forking
+ pid = os.fork()
+ if pid != 0:
+ # Parent
+ pid, sts = os.waitpid(pid, 0)
+ # throw away additional data [see bug #427345]
+ while select.select([self.rfile], [], [], 0)[0]:
+ if not self.rfile.read(1):
+ break
+ if sts:
+ self.log_error("CGI script exit status %#x", sts)
+ return
+ # Child
+ try:
+ try:
+ os.setuid(nobody)
+ except OSError:
+ pass
+ os.dup2(self.rfile.fileno(), 0)
+ os.dup2(self.wfile.fileno(), 1)
+ os.execve(scriptfile, args, env)
+ except:
+ self.server.handle_error(self.request, self.client_address)
+ os._exit(127)
+
+ else:
+ # Non-Unix -- use subprocess
+ cmdline = [scriptfile]
+ if self.is_python(scriptfile):
+ interp = sys.executable
+ if interp.lower().endswith("w.exe"):
+ # On Windows, use python.exe, not pythonw.exe
+ interp = interp[:-5] + interp[-4:]
+ cmdline = [interp, '-u'] + cmdline
+ if '=' not in query:
+ cmdline.append(query)
+ self.log_message("command: %s", subprocess.list2cmdline(cmdline))
+ try:
+ nbytes = int(length)
+ except (TypeError, ValueError):
+ nbytes = 0
+ p = subprocess.Popen(cmdline,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env = env
+ )
+ if self.command.lower() == "post" and nbytes > 0:
+ data = self.rfile.read(nbytes)
+ else:
+ data = None
+ # throw away additional data [see bug #427345]
+ while select.select([self.rfile._sock], [], [], 0)[0]:
+ if not self.rfile._sock.recv(1):
+ break
+ stdout, stderr = p.communicate(data)
+ self.wfile.write(stdout)
+ if stderr:
+ self.log_error('%s', stderr)
+ p.stderr.close()
+ p.stdout.close()
+ status = p.returncode
+ if status:
+ self.log_error("CGI script exit status %#x", status)
+ else:
+ self.log_message("CGI script exited OK")
+
+
+def test(HandlerClass=BaseHTTPRequestHandler,
+ ServerClass=HTTPServer, protocol="HTTP/1.0", port=8000, bind=""):
+ """Test the HTTP request handler class.
+
+ This runs an HTTP server on port 8000 (or the port argument).
+
+ """
+ server_address = (bind, port)
+
+ HandlerClass.protocol_version = protocol
+ with ServerClass(server_address, HandlerClass) as httpd:
+ sa = httpd.socket.getsockname()
+ serve_message = "Serving HTTP on {host} port {port} (http://{host}:{port}/) ..."
+ print(serve_message.format(host=sa[0], port=sa[1]))
+ try:
+ httpd.serve_forever()
+ except KeyboardInterrupt:
+ print("\nKeyboard interrupt received, exiting.")
+ sys.exit(0)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--cgi', action='store_true',
+ help='Run as CGI Server')
+ parser.add_argument('--bind', '-b', default='', metavar='ADDRESS',
+ help='Specify alternate bind address '
+ '[default: all interfaces]')
+ parser.add_argument('port', action='store',
+ default=8000, type=int,
+ nargs='?',
+ help='Specify alternate port [default: 8000]')
+ args = parser.parse_args()
+ if args.cgi:
+ handler_class = CGIHTTPRequestHandler
+ else:
+ handler_class = SimpleHTTPRequestHandler
+ test(HandlerClass=handler_class, port=args.port, bind=args.bind)
diff --git a/.venv/Lib/site-packages/eventlet/green/httplib.py b/.venv/Lib/site-packages/eventlet/green/httplib.py
new file mode 100644
index 0000000..f67dbfe
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/httplib.py
@@ -0,0 +1,18 @@
+from eventlet import patcher
+from eventlet.green import socket
+
+to_patch = [('socket', socket)]
+
+try:
+ from eventlet.green import ssl
+ to_patch.append(('ssl', ssl))
+except ImportError:
+ pass
+
+from eventlet.green.http import client
+for name in dir(client):
+ if name not in patcher.__exclude:
+ globals()[name] = getattr(client, name)
+
+if __name__ == '__main__':
+ test()
diff --git a/.venv/Lib/site-packages/eventlet/green/os.py b/.venv/Lib/site-packages/eventlet/green/os.py
new file mode 100644
index 0000000..8052be9
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/os.py
@@ -0,0 +1,119 @@
+os_orig = __import__("os")
+import errno
+socket = __import__("socket")
+
+from eventlet import greenio
+from eventlet.support import get_errno
+from eventlet import greenthread
+from eventlet import hubs
+from eventlet.patcher import slurp_properties
+
+__all__ = os_orig.__all__
+__patched__ = ['fdopen', 'read', 'write', 'wait', 'waitpid', 'open']
+
+slurp_properties(
+ os_orig,
+ globals(),
+ ignore=__patched__,
+ srckeys=dir(os_orig))
+
+
+def fdopen(fd, *args, **kw):
+ """fdopen(fd [, mode='r' [, bufsize]]) -> file_object
+
+ Return an open file object connected to a file descriptor."""
+ if not isinstance(fd, int):
+ raise TypeError('fd should be int, not %r' % fd)
+ try:
+ return greenio.GreenPipe(fd, *args, **kw)
+ except OSError as e:
+ raise OSError(*e.args)
+
+
+__original_read__ = os_orig.read
+
+
+def read(fd, n):
+ """read(fd, buffersize) -> string
+
+ Read a file descriptor."""
+ while True:
+ try:
+ return __original_read__(fd, n)
+ except OSError as e:
+ if get_errno(e) == errno.EPIPE:
+ return ''
+ if get_errno(e) != errno.EAGAIN:
+ raise
+ try:
+ hubs.trampoline(fd, read=True)
+ except hubs.IOClosed:
+ return ''
+
+
+__original_write__ = os_orig.write
+
+
+def write(fd, st):
+ """write(fd, string) -> byteswritten
+
+ Write a string to a file descriptor.
+ """
+ while True:
+ try:
+ return __original_write__(fd, st)
+ except OSError as e:
+ if get_errno(e) not in [errno.EAGAIN, errno.EPIPE]:
+ raise
+ hubs.trampoline(fd, write=True)
+
+
+def wait():
+ """wait() -> (pid, status)
+
+ Wait for completion of a child process."""
+ return waitpid(0, 0)
+
+
+__original_waitpid__ = os_orig.waitpid
+
+
+def waitpid(pid, options):
+ """waitpid(...)
+ waitpid(pid, options) -> (pid, status)
+
+ Wait for completion of a given child process."""
+ if options & os_orig.WNOHANG != 0:
+ return __original_waitpid__(pid, options)
+ else:
+ new_options = options | os_orig.WNOHANG
+ while True:
+ rpid, status = __original_waitpid__(pid, new_options)
+ if rpid and status >= 0:
+ return rpid, status
+ greenthread.sleep(0.01)
+
+
+__original_open__ = os_orig.open
+
+
+def open(file, flags, mode=0o777, dir_fd=None):
+ """ Wrap os.open
+ This behaves identically, but collaborates with
+ the hub's notify_opened protocol.
+ """
+ # pathlib workaround #534 pathlib._NormalAccessor wraps `open` in
+ # `staticmethod` for py < 3.7 but not 3.7. That means we get here with
+ # `file` being a pathlib._NormalAccessor object, and the other arguments
+ # shifted. Fortunately pathlib doesn't use the `dir_fd` argument, so we
+ # have space in the parameter list. We use some heuristics to detect this
+ # and adjust the parameters (without importing pathlib)
+ if type(file).__name__ == '_NormalAccessor':
+ file, flags, mode, dir_fd = flags, mode, dir_fd, None
+
+ if dir_fd is not None:
+ fd = __original_open__(file, flags, mode, dir_fd=dir_fd)
+ else:
+ fd = __original_open__(file, flags, mode)
+ hubs.notify_opened(fd)
+ return fd
diff --git a/.venv/Lib/site-packages/eventlet/green/profile.py b/.venv/Lib/site-packages/eventlet/green/profile.py
new file mode 100644
index 0000000..a03b507
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/profile.py
@@ -0,0 +1,257 @@
+# Copyright (c) 2010, CCP Games
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of CCP Games nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY CCP GAMES ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL CCP GAMES BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This module is API-equivalent to the standard library :mod:`profile` module
+lbut it is greenthread-aware as well as thread-aware. Use this module
+to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`.
+FIXME: No testcases for this module.
+"""
+
+profile_orig = __import__('profile')
+__all__ = profile_orig.__all__
+
+from eventlet.patcher import slurp_properties
+slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig))
+
+import sys
+import functools
+
+from eventlet import greenthread
+from eventlet import patcher
+import _thread
+
+thread = patcher.original(_thread.__name__) # non-monkeypatched module needed
+
+
+# This class provides the start() and stop() functions
+class Profile(profile_orig.Profile):
+ base = profile_orig.Profile
+
+ def __init__(self, timer=None, bias=None):
+ self.current_tasklet = greenthread.getcurrent()
+ self.thread_id = thread.get_ident()
+ self.base.__init__(self, timer, bias)
+ self.sleeping = {}
+
+ def __call__(self, *args):
+ """make callable, allowing an instance to be the profiler"""
+ self.dispatcher(*args)
+
+ def _setup(self):
+ self._has_setup = True
+ self.cur = None
+ self.timings = {}
+ self.current_tasklet = greenthread.getcurrent()
+ self.thread_id = thread.get_ident()
+ self.simulate_call("profiler")
+
+ def start(self, name="start"):
+ if getattr(self, "running", False):
+ return
+ self._setup()
+ self.simulate_call("start")
+ self.running = True
+ sys.setprofile(self.dispatcher)
+
+ def stop(self):
+ sys.setprofile(None)
+ self.running = False
+ self.TallyTimings()
+
+ # special cases for the original run commands, makin sure to
+ # clear the timer context.
+ def runctx(self, cmd, globals, locals):
+ if not getattr(self, "_has_setup", False):
+ self._setup()
+ try:
+ return profile_orig.Profile.runctx(self, cmd, globals, locals)
+ finally:
+ self.TallyTimings()
+
+ def runcall(self, func, *args, **kw):
+ if not getattr(self, "_has_setup", False):
+ self._setup()
+ try:
+ return profile_orig.Profile.runcall(self, func, *args, **kw)
+ finally:
+ self.TallyTimings()
+
+ def trace_dispatch_return_extend_back(self, frame, t):
+ """A hack function to override error checking in parent class. It
+ allows invalid returns (where frames weren't preveiously entered into
+ the profiler) which can happen for all the tasklets that suddenly start
+ to get monitored. This means that the time will eventually be attributed
+ to a call high in the chain, when there is a tasklet switch
+ """
+ if isinstance(self.cur[-2], Profile.fake_frame):
+ return False
+ self.trace_dispatch_call(frame, 0)
+ return self.trace_dispatch_return(frame, t)
+
+ def trace_dispatch_c_return_extend_back(self, frame, t):
+ # same for c return
+ if isinstance(self.cur[-2], Profile.fake_frame):
+ return False # ignore bogus returns
+ self.trace_dispatch_c_call(frame, 0)
+ return self.trace_dispatch_return(frame, t)
+
+ def SwitchTasklet(self, t0, t1, t):
+ # tally the time spent in the old tasklet
+ pt, it, et, fn, frame, rcur = self.cur
+ cur = (pt, it + t, et, fn, frame, rcur)
+
+ # we are switching to a new tasklet, store the old
+ self.sleeping[t0] = cur, self.timings
+ self.current_tasklet = t1
+
+ # find the new one
+ try:
+ self.cur, self.timings = self.sleeping.pop(t1)
+ except KeyError:
+ self.cur, self.timings = None, {}
+ self.simulate_call("profiler")
+ self.simulate_call("new_tasklet")
+
+ def TallyTimings(self):
+ oldtimings = self.sleeping
+ self.sleeping = {}
+
+ # first, unwind the main "cur"
+ self.cur = self.Unwind(self.cur, self.timings)
+
+ # we must keep the timings dicts separate for each tasklet, since it contains
+ # the 'ns' item, recursion count of each function in that tasklet. This is
+ # used in the Unwind dude.
+ for tasklet, (cur, timings) in oldtimings.items():
+ self.Unwind(cur, timings)
+
+ for k, v in timings.items():
+ if k not in self.timings:
+ self.timings[k] = v
+ else:
+ # accumulate all to the self.timings
+ cc, ns, tt, ct, callers = self.timings[k]
+ # ns should be 0 after unwinding
+ cc += v[0]
+ tt += v[2]
+ ct += v[3]
+ for k1, v1 in v[4].items():
+ callers[k1] = callers.get(k1, 0) + v1
+ self.timings[k] = cc, ns, tt, ct, callers
+
+ def Unwind(self, cur, timings):
+ "A function to unwind a 'cur' frame and tally the results"
+ "see profile.trace_dispatch_return() for details"
+ # also see simulate_cmd_complete()
+ while(cur[-1]):
+ rpt, rit, ret, rfn, frame, rcur = cur
+ frame_total = rit + ret
+
+ if rfn in timings:
+ cc, ns, tt, ct, callers = timings[rfn]
+ else:
+ cc, ns, tt, ct, callers = 0, 0, 0, 0, {}
+
+ if not ns:
+ ct = ct + frame_total
+ cc = cc + 1
+
+ if rcur:
+ ppt, pit, pet, pfn, pframe, pcur = rcur
+ else:
+ pfn = None
+
+ if pfn in callers:
+ callers[pfn] = callers[pfn] + 1 # hack: gather more
+ elif pfn:
+ callers[pfn] = 1
+
+ timings[rfn] = cc, ns - 1, tt + rit, ct, callers
+
+ ppt, pit, pet, pfn, pframe, pcur = rcur
+ rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
+ cur = rcur
+ return cur
+
+
+def ContextWrap(f):
+ @functools.wraps(f)
+ def ContextWrapper(self, arg, t):
+ current = greenthread.getcurrent()
+ if current != self.current_tasklet:
+ self.SwitchTasklet(self.current_tasklet, current, t)
+ t = 0.0 # the time was billed to the previous tasklet
+ return f(self, arg, t)
+ return ContextWrapper
+
+
+# Add "return safety" to the dispatchers
+Profile.dispatch = dict(profile_orig.Profile.dispatch, **{
+ 'return': Profile.trace_dispatch_return_extend_back,
+ 'c_return': Profile.trace_dispatch_c_return_extend_back,
+})
+# Add automatic tasklet detection to the callbacks.
+Profile.dispatch = {k: ContextWrap(v) for k, v in Profile.dispatch.items()}
+
+
+# run statements shamelessly stolen from profile.py
+def run(statement, filename=None, sort=-1):
+ """Run statement under profiler optionally saving results in filename
+
+ This function takes a single argument that can be passed to the
+ "exec" statement, and an optional file name. In all cases this
+ routine attempts to "exec" its first argument and gather profiling
+ statistics from the execution. If no file name is present, then this
+ function automatically prints a simple profiling report, sorted by the
+ standard name string (file/line/function-name) that is presented in
+ each line.
+ """
+ prof = Profile()
+ try:
+ prof = prof.run(statement)
+ except SystemExit:
+ pass
+ if filename is not None:
+ prof.dump_stats(filename)
+ else:
+ return prof.print_stats(sort)
+
+
+def runctx(statement, globals, locals, filename=None):
+ """Run statement under profiler, supplying your own globals and locals,
+ optionally saving results in filename.
+
+ statement and filename have the same semantics as profile.run
+ """
+ prof = Profile()
+ try:
+ prof = prof.runctx(statement, globals, locals)
+ except SystemExit:
+ pass
+
+ if filename is not None:
+ prof.dump_stats(filename)
+ else:
+ return prof.print_stats()
diff --git a/.venv/Lib/site-packages/eventlet/green/select.py b/.venv/Lib/site-packages/eventlet/green/select.py
new file mode 100644
index 0000000..a87d10d
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/select.py
@@ -0,0 +1,86 @@
+import eventlet
+from eventlet.hubs import get_hub
+__select = eventlet.patcher.original('select')
+error = __select.error
+
+
+__patched__ = ['select']
+__deleted__ = ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent']
+
+
+def get_fileno(obj):
+ # The purpose of this function is to exactly replicate
+ # the behavior of the select module when confronted with
+ # abnormal filenos; the details are extensively tested in
+ # the stdlib test/test_select.py.
+ try:
+ f = obj.fileno
+ except AttributeError:
+ if not isinstance(obj, int):
+ raise TypeError("Expected int or long, got %s" % type(obj))
+ return obj
+ else:
+ rv = f()
+ if not isinstance(rv, int):
+ raise TypeError("Expected int or long, got %s" % type(rv))
+ return rv
+
+
+def select(read_list, write_list, error_list, timeout=None):
+ # error checking like this is required by the stdlib unit tests
+ if timeout is not None:
+ try:
+ timeout = float(timeout)
+ except ValueError:
+ raise TypeError("Expected number for timeout")
+ hub = get_hub()
+ timers = []
+ current = eventlet.getcurrent()
+ if hub.greenlet is current:
+ raise RuntimeError('do not call blocking functions from the mainloop')
+ ds = {}
+ for r in read_list:
+ ds[get_fileno(r)] = {'read': r}
+ for w in write_list:
+ ds.setdefault(get_fileno(w), {})['write'] = w
+ for e in error_list:
+ ds.setdefault(get_fileno(e), {})['error'] = e
+
+ listeners = []
+
+ def on_read(d):
+ original = ds[get_fileno(d)]['read']
+ current.switch(([original], [], []))
+
+ def on_write(d):
+ original = ds[get_fileno(d)]['write']
+ current.switch(([], [original], []))
+
+ def on_timeout2():
+ current.switch(([], [], []))
+
+ def on_timeout():
+ # ensure that BaseHub.run() has a chance to call self.wait()
+ # at least once before timed out. otherwise the following code
+ # can time out erroneously.
+ #
+ # s1, s2 = socket.socketpair()
+ # print(select.select([], [s1], [], 0))
+ timers.append(hub.schedule_call_global(0, on_timeout2))
+
+ if timeout is not None:
+ timers.append(hub.schedule_call_global(timeout, on_timeout))
+ try:
+ for k, v in ds.items():
+ if v.get('read'):
+ listeners.append(hub.add(hub.READ, k, on_read, current.throw, lambda: None))
+ if v.get('write'):
+ listeners.append(hub.add(hub.WRITE, k, on_write, current.throw, lambda: None))
+ try:
+ return hub.switch()
+ finally:
+ for l in listeners:
+ hub.remove(l)
+ finally:
+ for t in timers:
+ t.cancel()
diff --git a/.venv/Lib/site-packages/eventlet/green/selectors.py b/.venv/Lib/site-packages/eventlet/green/selectors.py
new file mode 100644
index 0000000..81fc862
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/selectors.py
@@ -0,0 +1,34 @@
+import sys
+
+from eventlet import patcher
+from eventlet.green import select
+
+__patched__ = [
+ 'DefaultSelector',
+ 'SelectSelector',
+]
+
+# We only have green select so the options are:
+# * leave it be and have selectors that block
+# * try to pretend the "bad" selectors don't exist
+# * replace all with SelectSelector for the price of possibly different
+# performance characteristic and missing fileno() method (if someone
+# uses it it'll result in a crash, we may want to implement it in the future)
+#
+# This module used to follow the third approach but just removing the offending
+# selectors is less error prone and less confusing approach.
+__deleted__ = [
+ 'PollSelector',
+ 'EpollSelector',
+ 'DevpollSelector',
+ 'KqueueSelector',
+]
+
+patcher.inject('selectors', globals(), ('select', select))
+
+del patcher
+
+if sys.platform != 'win32':
+ SelectSelector._select = staticmethod(select.select)
+
+DefaultSelector = SelectSelector
diff --git a/.venv/Lib/site-packages/eventlet/green/socket.py b/.venv/Lib/site-packages/eventlet/green/socket.py
new file mode 100644
index 0000000..6a39caf
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/socket.py
@@ -0,0 +1,63 @@
+import os
+import sys
+
+__import__('eventlet.green._socket_nodns')
+__socket = sys.modules['eventlet.green._socket_nodns']
+
+__all__ = __socket.__all__
+__patched__ = __socket.__patched__ + [
+ 'create_connection',
+ 'getaddrinfo',
+ 'gethostbyname',
+ 'gethostbyname_ex',
+ 'getnameinfo',
+]
+
+from eventlet.patcher import slurp_properties
+slurp_properties(__socket, globals(), srckeys=dir(__socket))
+
+
+if os.environ.get("EVENTLET_NO_GREENDNS", '').lower() != 'yes':
+ from eventlet.support import greendns
+ gethostbyname = greendns.gethostbyname
+ getaddrinfo = greendns.getaddrinfo
+ gethostbyname_ex = greendns.gethostbyname_ex
+ getnameinfo = greendns.getnameinfo
+ del greendns
+
+
+def create_connection(address,
+ timeout=_GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`getdefaulttimeout`
+ is used.
+ """
+
+ err = "getaddrinfo returns an empty list"
+ host, port = address
+ for res in getaddrinfo(host, port, 0, SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket(af, socktype, proto)
+ if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+
+ if not isinstance(err, error):
+ err = error(err)
+ raise err
diff --git a/.venv/Lib/site-packages/eventlet/green/ssl.py b/.venv/Lib/site-packages/eventlet/green/ssl.py
new file mode 100644
index 0000000..7ceb3c7
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/ssl.py
@@ -0,0 +1,487 @@
+__ssl = __import__('ssl')
+
+from eventlet.patcher import slurp_properties
+slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
+
+import sys
+from eventlet import greenio, hubs
+from eventlet.greenio import (
+ GreenSocket, CONNECT_ERR, CONNECT_SUCCESS,
+)
+from eventlet.hubs import trampoline, IOClosed
+from eventlet.support import get_errno, PY33
+from contextlib import contextmanager
+
+orig_socket = __import__('socket')
+socket = orig_socket.socket
+timeout_exc = orig_socket.timeout
+
+__patched__ = [
+ 'SSLSocket', 'SSLContext', 'wrap_socket', 'sslwrap_simple',
+ 'create_default_context', '_create_default_https_context']
+
+_original_sslsocket = __ssl.SSLSocket
+_original_sslcontext = __ssl.SSLContext
+_is_py_3_7 = sys.version_info[:2] == (3, 7)
+_original_wrap_socket = __ssl.SSLContext.wrap_socket
+
+
+@contextmanager
+def _original_ssl_context(*args, **kwargs):
+ tmp_sslcontext = _original_wrap_socket.__globals__.get('SSLContext', None)
+ tmp_sslsocket = _original_sslsocket._create.__globals__.get('SSLSocket', None)
+ _original_sslsocket._create.__globals__['SSLSocket'] = _original_sslsocket
+ _original_wrap_socket.__globals__['SSLContext'] = _original_sslcontext
+ try:
+ yield
+ finally:
+ _original_wrap_socket.__globals__['SSLContext'] = tmp_sslcontext
+ _original_sslsocket._create.__globals__['SSLSocket'] = tmp_sslsocket
+
+
+class GreenSSLSocket(_original_sslsocket):
+ """ This is a green version of the SSLSocket class from the ssl module added
+ in 2.6. For documentation on it, please see the Python standard
+ documentation.
+
+ Python nonblocking ssl objects don't give errors when the other end
+ of the socket is closed (they do notice when the other end is shutdown,
+ though). Any write/read operations will simply hang if the socket is
+ closed from the other end. There is no obvious fix for this problem;
+ it appears to be a limitation of Python's ssl object implementation.
+ A workaround is to set a reasonable timeout on the socket using
+ settimeout(), and to close/reopen the connection when a timeout
+ occurs at an unexpected juncture in the code.
+ """
+ def __new__(cls, sock=None, keyfile=None, certfile=None,
+ server_side=False, cert_reqs=CERT_NONE,
+ ssl_version=PROTOCOL_TLS, ca_certs=None,
+ do_handshake_on_connect=True, *args, **kw):
+ if not isinstance(sock, GreenSocket):
+ sock = GreenSocket(sock)
+ with _original_ssl_context():
+ context = kw.get('_context')
+ if context:
+ ret = _original_sslsocket._create(
+ sock=sock.fd,
+ server_side=server_side,
+ do_handshake_on_connect=False,
+ suppress_ragged_eofs=kw.get('suppress_ragged_eofs', True),
+ server_hostname=kw.get('server_hostname'),
+ context=context,
+ session=kw.get('session'),
+ )
+ else:
+ ret = cls._wrap_socket(
+ sock=sock.fd,
+ keyfile=keyfile,
+ certfile=certfile,
+ server_side=server_side,
+ cert_reqs=cert_reqs,
+ ssl_version=ssl_version,
+ ca_certs=ca_certs,
+ do_handshake_on_connect=False,
+ ciphers=kw.get('ciphers'),
+ )
+ ret.keyfile = keyfile
+ ret.certfile = certfile
+ ret.cert_reqs = cert_reqs
+ ret.ssl_version = ssl_version
+ ret.ca_certs = ca_certs
+ ret.__class__ = GreenSSLSocket
+ return ret
+
+ @staticmethod
+ def _wrap_socket(sock, keyfile, certfile, server_side, cert_reqs,
+ ssl_version, ca_certs, do_handshake_on_connect, ciphers):
+ context = _original_sslcontext(protocol=ssl_version)
+ context.options |= cert_reqs
+ if certfile or keyfile:
+ context.load_cert_chain(
+ certfile=certfile,
+ keyfile=keyfile,
+ )
+ if ca_certs:
+ context.load_verify_locations(ca_certs)
+ if ciphers:
+ context.set_ciphers(ciphers)
+ return context.wrap_socket(
+ sock=sock,
+ server_side=server_side,
+ do_handshake_on_connect=do_handshake_on_connect,
+ )
+
+ # we are inheriting from SSLSocket because its constructor calls
+ # do_handshake whose behavior we wish to override
+ def __init__(self, sock, keyfile=None, certfile=None,
+ server_side=False, cert_reqs=CERT_NONE,
+ ssl_version=PROTOCOL_TLS, ca_certs=None,
+ do_handshake_on_connect=True, *args, **kw):
+ if not isinstance(sock, GreenSocket):
+ sock = GreenSocket(sock)
+ self.act_non_blocking = sock.act_non_blocking
+
+ # the superclass initializer trashes the methods so we remove
+ # the local-object versions of them and let the actual class
+ # methods shine through
+ # Note: This for Python 2
+ try:
+ for fn in orig_socket._delegate_methods:
+ delattr(self, fn)
+ except AttributeError:
+ pass
+
+ # Python 3 SSLSocket construction process overwrites the timeout so restore it
+ self._timeout = sock.gettimeout()
+
+ # it also sets timeout to None internally apparently (tested with 3.4.2)
+ _original_sslsocket.settimeout(self, 0.0)
+ assert _original_sslsocket.gettimeout(self) == 0.0
+
+ # see note above about handshaking
+ self.do_handshake_on_connect = do_handshake_on_connect
+ if do_handshake_on_connect and self._connected:
+ self.do_handshake()
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def setblocking(self, flag):
+ if flag:
+ self.act_non_blocking = False
+ self._timeout = None
+ else:
+ self.act_non_blocking = True
+ self._timeout = 0.0
+
+ def _call_trampolining(self, func, *a, **kw):
+ if self.act_non_blocking:
+ return func(*a, **kw)
+ else:
+ while True:
+ try:
+ return func(*a, **kw)
+ except SSLError as exc:
+ if get_errno(exc) == SSL_ERROR_WANT_READ:
+ trampoline(self,
+ read=True,
+ timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+ elif get_errno(exc) == SSL_ERROR_WANT_WRITE:
+ trampoline(self,
+ write=True,
+ timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+ elif _is_py_3_7 and "unexpected eof" in exc.args[1]:
+ # For reasons I don't understand on 3.7 we get [ssl:
+ # KRB5_S_TKT_NYV] unexpected eof while reading]
+ # errors...
+ raise IOClosed
+ else:
+ raise
+
+ def write(self, data):
+ """Write DATA to the underlying SSL channel. Returns
+ number of bytes of DATA actually transmitted."""
+ return self._call_trampolining(
+ super().write, data)
+
+ def read(self, len=1024, buffer=None):
+ """Read up to LEN bytes and return them.
+ Return zero-length string on EOF."""
+ try:
+ return self._call_trampolining(
+ super().read, len, buffer)
+ except IOClosed:
+ if buffer is None:
+ return b''
+ else:
+ return 0
+
+ def send(self, data, flags=0):
+ if self._sslobj:
+ return self._call_trampolining(
+ super().send, data, flags)
+ else:
+ trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+ return socket.send(self, data, flags)
+
+ def sendto(self, data, addr, flags=0):
+ # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
+ if self._sslobj:
+ raise ValueError("sendto not allowed on instances of %s" %
+ self.__class__)
+ else:
+ trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+ return socket.sendto(self, data, addr, flags)
+
+ def sendall(self, data, flags=0):
+ # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
+ if self._sslobj:
+ if flags != 0:
+ raise ValueError(
+ "non-zero flags not allowed in calls to sendall() on %s" %
+ self.__class__)
+ amount = len(data)
+ count = 0
+ data_to_send = data
+ while (count < amount):
+ v = self.send(data_to_send)
+ count += v
+ if v == 0:
+ trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+ else:
+ data_to_send = data[count:]
+ return amount
+ else:
+ while True:
+ try:
+ return socket.sendall(self, data, flags)
+ except orig_socket.error as e:
+ if self.act_non_blocking:
+ raise
+ erno = get_errno(e)
+ if erno in greenio.SOCKET_BLOCKING:
+ trampoline(self, write=True,
+ timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+ elif erno in greenio.SOCKET_CLOSED:
+ return ''
+ raise
+
+ def recv(self, buflen=1024, flags=0):
+ return self._base_recv(buflen, flags, into=False)
+
+ def recv_into(self, buffer, nbytes=None, flags=0):
+ # Copied verbatim from CPython
+ if buffer and nbytes is None:
+ nbytes = len(buffer)
+ elif nbytes is None:
+ nbytes = 1024
+ # end of CPython code
+
+ return self._base_recv(nbytes, flags, into=True, buffer_=buffer)
+
+ def _base_recv(self, nbytes, flags, into, buffer_=None):
+ if into:
+ plain_socket_function = socket.recv_into
+ else:
+ plain_socket_function = socket.recv
+
+ # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
+ if self._sslobj:
+ if flags != 0:
+ raise ValueError(
+ "non-zero flags not allowed in calls to %s() on %s" %
+ plain_socket_function.__name__, self.__class__)
+ if into:
+ read = self.read(nbytes, buffer_)
+ else:
+ read = self.read(nbytes)
+ return read
+ else:
+ while True:
+ try:
+ args = [self, nbytes, flags]
+ if into:
+ args.insert(1, buffer_)
+ return plain_socket_function(*args)
+ except orig_socket.error as e:
+ if self.act_non_blocking:
+ raise
+ erno = get_errno(e)
+ if erno in greenio.SOCKET_BLOCKING:
+ try:
+ trampoline(
+ self, read=True,
+ timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+ except IOClosed:
+ return b''
+ elif erno in greenio.SOCKET_CLOSED:
+ return b''
+ raise
+
+ def recvfrom(self, addr, buflen=1024, flags=0):
+ if not self.act_non_blocking:
+ trampoline(self, read=True, timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+ return super().recvfrom(addr, buflen, flags)
+
+ def recvfrom_into(self, buffer, nbytes=None, flags=0):
+ if not self.act_non_blocking:
+ trampoline(self, read=True, timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+ return super().recvfrom_into(buffer, nbytes, flags)
+
+ def unwrap(self):
+ return GreenSocket(self._call_trampolining(
+ super().unwrap))
+
+ def do_handshake(self):
+ """Perform a TLS/SSL handshake."""
+ return self._call_trampolining(
+ super().do_handshake)
+
+ def _socket_connect(self, addr):
+ real_connect = socket.connect
+ if self.act_non_blocking:
+ return real_connect(self, addr)
+ else:
+ clock = hubs.get_hub().clock
+ # *NOTE: gross, copied code from greenio because it's not factored
+ # well enough to reuse
+ if self.gettimeout() is None:
+ while True:
+ try:
+ return real_connect(self, addr)
+ except orig_socket.error as exc:
+ if get_errno(exc) in CONNECT_ERR:
+ trampoline(self, write=True)
+ elif get_errno(exc) in CONNECT_SUCCESS:
+ return
+ else:
+ raise
+ else:
+ end = clock() + self.gettimeout()
+ while True:
+ try:
+ real_connect(self, addr)
+ except orig_socket.error as exc:
+ if get_errno(exc) in CONNECT_ERR:
+ trampoline(
+ self, write=True,
+ timeout=end - clock(), timeout_exc=timeout_exc('timed out'))
+ elif get_errno(exc) in CONNECT_SUCCESS:
+ return
+ else:
+ raise
+ if clock() >= end:
+ raise timeout_exc('timed out')
+
+ def connect(self, addr):
+ """Connects to remote ADDR, and then wraps the connection in
+ an SSL channel."""
+ # *NOTE: grrrrr copied this code from ssl.py because of the reference
+ # to socket.connect which we don't want to call directly
+ if self._sslobj:
+ raise ValueError("attempt to connect already-connected SSLSocket!")
+ self._socket_connect(addr)
+ server_side = False
+ try:
+ sslwrap = _ssl.sslwrap
+ except AttributeError:
+ # sslwrap was removed in 3.x and later in 2.7.9
+ context = self.context if PY33 else self._context
+ sslobj = context._wrap_socket(self, server_side, server_hostname=self.server_hostname)
+ else:
+ sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile,
+ self.cert_reqs, self.ssl_version,
+ self.ca_certs, *self.ciphers)
+
+ try:
+ # This is added in Python 3.5, http://bugs.python.org/issue21965
+ SSLObject
+ except NameError:
+ self._sslobj = sslobj
+ else:
+ self._sslobj = sslobj
+
+ if self.do_handshake_on_connect:
+ self.do_handshake()
+
+ def accept(self):
+ """Accepts a new connection from a remote client, and returns
+ a tuple containing that new connection wrapped with a server-side
+ SSL channel, and the address of the remote client."""
+ # RDW grr duplication of code from greenio
+ if self.act_non_blocking:
+ newsock, addr = socket.accept(self)
+ else:
+ while True:
+ try:
+ newsock, addr = socket.accept(self)
+ break
+ except orig_socket.error as e:
+ if get_errno(e) not in greenio.SOCKET_BLOCKING:
+ raise
+ trampoline(self, read=True, timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+
+ new_ssl = type(self)(
+ newsock,
+ server_side=True,
+ do_handshake_on_connect=False,
+ suppress_ragged_eofs=self.suppress_ragged_eofs,
+ _context=self._context,
+ )
+ return (new_ssl, addr)
+
+ def dup(self):
+ raise NotImplementedError("Can't dup an ssl object")
+
+
+SSLSocket = GreenSSLSocket
+
+
+def wrap_socket(sock, *a, **kw):
+ return GreenSSLSocket(sock, *a, **kw)
+
+
+class GreenSSLContext(_original_sslcontext):
+ __slots__ = ()
+
+ def wrap_socket(self, sock, *a, **kw):
+ return GreenSSLSocket(sock, *a, _context=self, **kw)
+
+ # https://github.com/eventlet/eventlet/issues/371
+ # Thanks to Gevent developers for sharing patch to this problem.
+ if hasattr(_original_sslcontext.options, 'setter'):
+ # In 3.6, these became properties. They want to access the
+ # property __set__ method in the superclass, and they do so by using
+ # super(SSLContext, SSLContext). But we rebind SSLContext when we monkey
+ # patch, which causes infinite recursion.
+ # https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296
+ @_original_sslcontext.options.setter
+ def options(self, value):
+ super(_original_sslcontext, _original_sslcontext).options.__set__(self, value)
+
+ @_original_sslcontext.verify_flags.setter
+ def verify_flags(self, value):
+ super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value)
+
+ @_original_sslcontext.verify_mode.setter
+ def verify_mode(self, value):
+ super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value)
+
+ if hasattr(_original_sslcontext, "maximum_version"):
+ @_original_sslcontext.maximum_version.setter
+ def maximum_version(self, value):
+ super(_original_sslcontext, _original_sslcontext).maximum_version.__set__(self, value)
+
+ if hasattr(_original_sslcontext, "minimum_version"):
+ @_original_sslcontext.minimum_version.setter
+ def minimum_version(self, value):
+ super(_original_sslcontext, _original_sslcontext).minimum_version.__set__(self, value)
+
+
+SSLContext = GreenSSLContext
+
+
+# TODO: ssl.create_default_context() was added in 2.7.9.
+# Not clear we're still trying to support Python versions even older than that.
+if hasattr(__ssl, 'create_default_context'):
+ _original_create_default_context = __ssl.create_default_context
+
+ def green_create_default_context(*a, **kw):
+ # We can't just monkey-patch on the green version of `wrap_socket`
+ # on to SSLContext instances, but SSLContext.create_default_context
+ # does a bunch of work. Rather than re-implementing it all, just
+ # switch out the __class__ to get our `wrap_socket` implementation
+ context = _original_create_default_context(*a, **kw)
+ context.__class__ = GreenSSLContext
+ return context
+
+ create_default_context = green_create_default_context
+ _create_default_https_context = green_create_default_context
diff --git a/.venv/Lib/site-packages/eventlet/green/subprocess.py b/.venv/Lib/site-packages/eventlet/green/subprocess.py
new file mode 100644
index 0000000..4509208
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/subprocess.py
@@ -0,0 +1,137 @@
+import errno
+import sys
+from types import FunctionType
+
+import eventlet
+from eventlet import greenio
+from eventlet import patcher
+from eventlet.green import select, threading, time
+
+
+__patched__ = ['call', 'check_call', 'Popen']
+to_patch = [('select', select), ('threading', threading), ('time', time)]
+
+from eventlet.green import selectors
+to_patch.append(('selectors', selectors))
+
+patcher.inject('subprocess', globals(), *to_patch)
+subprocess_orig = patcher.original("subprocess")
+subprocess_imported = sys.modules.get('subprocess', subprocess_orig)
+mswindows = sys.platform == "win32"
+
+
+if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
+ # Backported from Python 3.3.
+ # https://bitbucket.org/eventlet/eventlet/issue/89
+ class TimeoutExpired(Exception):
+ """This exception is raised when the timeout expires while waiting for
+ a child process.
+ """
+
+ def __init__(self, cmd, timeout, output=None):
+ self.cmd = cmd
+ self.timeout = timeout
+ self.output = output
+
+ def __str__(self):
+ return ("Command '%s' timed out after %s seconds" %
+ (self.cmd, self.timeout))
+else:
+ TimeoutExpired = subprocess_imported.TimeoutExpired
+
+
+# This is the meat of this module, the green version of Popen.
+class Popen(subprocess_orig.Popen):
+ """eventlet-friendly version of subprocess.Popen"""
+ # We do not believe that Windows pipes support non-blocking I/O. At least,
+ # the Python file objects stored on our base-class object have no
+ # setblocking() method, and the Python fcntl module doesn't exist on
+ # Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of
+ # this __init__() override is to wrap the pipes for eventlet-friendly
+ # non-blocking I/O, don't even bother overriding it on Windows.
+ if not mswindows:
+ def __init__(self, args, bufsize=0, *argss, **kwds):
+ self.args = args
+ # Forward the call to base-class constructor
+ subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds)
+ # Now wrap the pipes, if any. This logic is loosely borrowed from
+ # eventlet.processes.Process.run() method.
+ for attr in "stdin", "stdout", "stderr":
+ pipe = getattr(self, attr)
+ if pipe is not None and type(pipe) != greenio.GreenPipe:
+ # https://github.com/eventlet/eventlet/issues/243
+ # AttributeError: '_io.TextIOWrapper' object has no attribute 'mode'
+ mode = getattr(pipe, 'mode', '')
+ if not mode:
+ if pipe.readable():
+ mode += 'r'
+ if pipe.writable():
+ mode += 'w'
+ # ValueError: can't have unbuffered text I/O
+ if bufsize == 0:
+ bufsize = -1
+ wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize)
+ setattr(self, attr, wrapped_pipe)
+ __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
+
+ def wait(self, timeout=None, check_interval=0.01):
+ # Instead of a blocking OS call, this version of wait() uses logic
+ # borrowed from the eventlet 0.2 processes.Process.wait() method.
+ if timeout is not None:
+ endtime = time.time() + timeout
+ try:
+ while True:
+ status = self.poll()
+ if status is not None:
+ return status
+ if timeout is not None and time.time() > endtime:
+ raise TimeoutExpired(self.args, timeout)
+ eventlet.sleep(check_interval)
+ except OSError as e:
+ if e.errno == errno.ECHILD:
+ # no child process, this happens if the child process
+ # already died and has been cleaned up
+ return -1
+ else:
+ raise
+ wait.__doc__ = subprocess_orig.Popen.wait.__doc__
+
+ if not mswindows:
+ # don't want to rewrite the original _communicate() method, we
+ # just want a version that uses eventlet.green.select.select()
+ # instead of select.select().
+ _communicate = FunctionType(
+ subprocess_orig.Popen._communicate.__code__,
+ globals())
+ try:
+ _communicate_with_select = FunctionType(
+ subprocess_orig.Popen._communicate_with_select.__code__,
+ globals())
+ _communicate_with_poll = FunctionType(
+ subprocess_orig.Popen._communicate_with_poll.__code__,
+ globals())
+ except AttributeError:
+ pass
+
+
+# Borrow subprocess.call() and check_call(), but patch them so they reference
+# OUR Popen class rather than subprocess.Popen.
+def patched_function(function):
+ new_function = FunctionType(function.__code__, globals())
+ new_function.__kwdefaults__ = function.__kwdefaults__
+ new_function.__defaults__ = function.__defaults__
+ return new_function
+
+
+call = patched_function(subprocess_orig.call)
+check_call = patched_function(subprocess_orig.check_call)
+# check_output is Python 2.7+
+if hasattr(subprocess_orig, 'check_output'):
+ __patched__.append('check_output')
+ check_output = patched_function(subprocess_orig.check_output)
+del patched_function
+
+# Keep exceptions identity.
+# https://github.com/eventlet/eventlet/issues/413
+CalledProcessError = subprocess_imported.CalledProcessError
+del subprocess_imported
diff --git a/.venv/Lib/site-packages/eventlet/green/thread.py b/.venv/Lib/site-packages/eventlet/green/thread.py
new file mode 100644
index 0000000..053a1c3
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/thread.py
@@ -0,0 +1,120 @@
+"""Implements the standard thread module, using greenthreads."""
+import _thread as __thread
+from eventlet.support import greenlets as greenlet
+from eventlet import greenthread
+from eventlet.lock import Lock
+import sys
+
+
+__patched__ = ['get_ident', 'start_new_thread', 'start_new', 'allocate_lock',
+ 'allocate', 'exit', 'interrupt_main', 'stack_size', '_local',
+ 'LockType', 'Lock', '_count']
+
+error = __thread.error
+LockType = Lock
+__threadcount = 0
+
+if hasattr(__thread, "_is_main_interpreter"):
+ _is_main_interpreter = __thread._is_main_interpreter
+
+
+def _set_sentinel():
+ # TODO this is a dummy code, reimplementing this may be needed:
+ # https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203
+ return allocate_lock()
+
+
+TIMEOUT_MAX = __thread.TIMEOUT_MAX
+
+
+def _count():
+ return __threadcount
+
+
+def get_ident(gr=None):
+ if gr is None:
+ return id(greenlet.getcurrent())
+ else:
+ return id(gr)
+
+
+def __thread_body(func, args, kwargs):
+ global __threadcount
+ __threadcount += 1
+ try:
+ func(*args, **kwargs)
+ finally:
+ __threadcount -= 1
+
+
+def start_new_thread(function, args=(), kwargs=None):
+ if (sys.version_info >= (3, 4)
+ and getattr(function, '__module__', '') == 'threading'
+ and hasattr(function, '__self__')):
+ # Since Python 3.4, threading.Thread uses an internal lock
+ # automatically released when the python thread state is deleted.
+ # With monkey patching, eventlet uses green threads without python
+ # thread state, so the lock is not automatically released.
+ #
+ # Wrap _bootstrap_inner() to release explicitly the thread state lock
+ # when the thread completes.
+ thread = function.__self__
+ bootstrap_inner = thread._bootstrap_inner
+
+ def wrap_bootstrap_inner():
+ try:
+ bootstrap_inner()
+ finally:
+ # The lock can be cleared (ex: by a fork())
+ if thread._tstate_lock is not None:
+ thread._tstate_lock.release()
+
+ thread._bootstrap_inner = wrap_bootstrap_inner
+
+ kwargs = kwargs or {}
+ g = greenthread.spawn_n(__thread_body, function, args, kwargs)
+ return get_ident(g)
+
+
+start_new = start_new_thread
+
+
+def allocate_lock(*a):
+ return LockType(1)
+
+
+allocate = allocate_lock
+
+
+def exit():
+ raise greenlet.GreenletExit
+
+
+exit_thread = __thread.exit_thread
+
+
+def interrupt_main():
+ curr = greenlet.getcurrent()
+ if curr.parent and not curr.parent.dead:
+ curr.parent.throw(KeyboardInterrupt())
+ else:
+ raise KeyboardInterrupt()
+
+
+if hasattr(__thread, 'stack_size'):
+ __original_stack_size__ = __thread.stack_size
+
+ def stack_size(size=None):
+ if size is None:
+ return __original_stack_size__()
+ if size > __original_stack_size__():
+ return __original_stack_size__(size)
+ else:
+ pass
+ # not going to decrease stack_size, because otherwise other greenlets in
+ # this thread will suffer
+
+from eventlet.corolocal import local as _local
+
+if hasattr(__thread, 'daemon_threads_allowed'):
+ daemon_threads_allowed = __thread.daemon_threads_allowed
diff --git a/.venv/Lib/site-packages/eventlet/green/threading.py b/.venv/Lib/site-packages/eventlet/green/threading.py
new file mode 100644
index 0000000..7ea20cd
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/threading.py
@@ -0,0 +1,131 @@
+"""Implements the standard threading module, using greenthreads."""
+import eventlet
+from eventlet.green import thread
+from eventlet.green import time
+from eventlet.support import greenlets as greenlet
+
+__patched__ = ['_start_new_thread', '_allocate_lock',
+ '_sleep', 'local', 'stack_size', 'Lock', 'currentThread',
+ 'current_thread', '_after_fork', '_shutdown']
+
+__patched__ += ['get_ident', '_set_sentinel']
+
+__orig_threading = eventlet.patcher.original('threading')
+__threadlocal = __orig_threading.local()
+__patched_enumerate = None
+
+
+eventlet.patcher.inject(
+ 'threading',
+ globals(),
+ ('_thread', thread),
+ ('time', time))
+
+
+_count = 1
+
+
+class _GreenThread:
+ """Wrapper for GreenThread objects to provide Thread-like attributes
+ and methods"""
+
+ def __init__(self, g):
+ global _count
+ self._g = g
+ self._name = 'GreenThread-%d' % _count
+ _count += 1
+
+ def __repr__(self):
+ return '<_GreenThread(%s, %r)>' % (self._name, self._g)
+
+ def join(self, timeout=None):
+ return self._g.wait()
+
+ def getName(self):
+ return self._name
+ get_name = getName
+
+ def setName(self, name):
+ self._name = str(name)
+ set_name = setName
+
+ name = property(getName, setName)
+
+ ident = property(lambda self: id(self._g))
+
+ def isAlive(self):
+ return True
+ is_alive = isAlive
+
+ daemon = property(lambda self: True)
+
+ def isDaemon(self):
+ return self.daemon
+ is_daemon = isDaemon
+
+
+__threading = None
+
+
+def _fixup_thread(t):
+ # Some third-party packages (lockfile) will try to patch the
+ # threading.Thread class with a get_name attribute if it doesn't
+ # exist. Since we might return Thread objects from the original
+ # threading package that won't get patched, let's make sure each
+ # individual object gets patched too our patched threading.Thread
+ # class has been patched. This is why monkey patching can be bad...
+ global __threading
+ if not __threading:
+ __threading = __import__('threading')
+
+ if (hasattr(__threading.Thread, 'get_name') and
+ not hasattr(t, 'get_name')):
+ t.get_name = t.getName
+ return t
+
+
+def current_thread():
+ global __patched_enumerate
+ g = greenlet.getcurrent()
+ if not g:
+ # Not currently in a greenthread, fall back to standard function
+ return _fixup_thread(__orig_threading.current_thread())
+
+ try:
+ active = __threadlocal.active
+ except AttributeError:
+ active = __threadlocal.active = {}
+
+ g_id = id(g)
+ t = active.get(g_id)
+ if t is not None:
+ return t
+
+ # FIXME: move import from function body to top
+ # (jaketesler@github) Furthermore, I was unable to have the current_thread() return correct results from
+ # threading.enumerate() unless the enumerate() function was a) imported at runtime using the gross __import__() call
+ # and b) was hot-patched using patch_function().
+ # https://github.com/eventlet/eventlet/issues/172#issuecomment-379421165
+ if __patched_enumerate is None:
+ __patched_enumerate = eventlet.patcher.patch_function(__import__('threading').enumerate)
+ found = [th for th in __patched_enumerate() if th.ident == g_id]
+ if found:
+ return found[0]
+
+ # Add green thread to active if we can clean it up on exit
+ def cleanup(g):
+ del active[g_id]
+ try:
+ g.link(cleanup)
+ except AttributeError:
+ # Not a GreenThread type, so there's no way to hook into
+ # the green thread exiting. Fall back to the standard
+ # function then.
+ t = _fixup_thread(__orig_threading.current_thread())
+ else:
+ t = active[g_id] = _GreenThread(g)
+
+ return t
+
+
+currentThread = current_thread
diff --git a/.venv/Lib/site-packages/eventlet/green/time.py b/.venv/Lib/site-packages/eventlet/green/time.py
new file mode 100644
index 0000000..0fbe30e
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/time.py
@@ -0,0 +1,6 @@
+__time = __import__('time')
+from eventlet.patcher import slurp_properties
+__patched__ = ['sleep']
+slurp_properties(__time, globals(), ignore=__patched__, srckeys=dir(__time))
+from eventlet.greenthread import sleep
+sleep # silence pyflakes
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/__init__.py b/.venv/Lib/site-packages/eventlet/green/urllib/__init__.py
new file mode 100644
index 0000000..44335dd
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/urllib/__init__.py
@@ -0,0 +1,5 @@
+from eventlet import patcher
+from eventlet.green import socket
+from eventlet.green import time
+from eventlet.green import httplib
+from eventlet.green import ftplib
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..4c01e95
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/error.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/error.cpython-312.pyc
new file mode 100644
index 0000000..7735961
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/error.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/parse.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/parse.cpython-312.pyc
new file mode 100644
index 0000000..bc3cafd
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/parse.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/request.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/request.cpython-312.pyc
new file mode 100644
index 0000000..9f139e8
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/request.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/response.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/response.cpython-312.pyc
new file mode 100644
index 0000000..eb610ad
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/green/urllib/__pycache__/response.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/error.py b/.venv/Lib/site-packages/eventlet/green/urllib/error.py
new file mode 100644
index 0000000..6913813
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/urllib/error.py
@@ -0,0 +1,4 @@
+from eventlet import patcher
+from eventlet.green.urllib import response
+patcher.inject('urllib.error', globals(), ('urllib.response', response))
+del patcher
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/parse.py b/.venv/Lib/site-packages/eventlet/green/urllib/parse.py
new file mode 100644
index 0000000..f3a8924
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/urllib/parse.py
@@ -0,0 +1,3 @@
+from eventlet import patcher
+patcher.inject('urllib.parse', globals())
+del patcher
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/request.py b/.venv/Lib/site-packages/eventlet/green/urllib/request.py
new file mode 100644
index 0000000..dca7863
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/urllib/request.py
@@ -0,0 +1,50 @@
+from eventlet import patcher
+from eventlet.green import ftplib, http, os, socket, time
+from eventlet.green.http import client as http_client
+from eventlet.green.urllib import error, parse, response
+
+# TODO should we also have green email version?
+# import email
+
+
+to_patch = [
+ # This (http module) is needed here, otherwise test__greenness hangs
+ # forever on Python 3 because parts of non-green http (including
+ # http.client) leak into our patched urllib.request. There may be a nicer
+ # way to handle this (I didn't dig too deep) but this does the job. Jakub
+ ('http', http),
+
+ ('http.client', http_client),
+ ('os', os),
+ ('socket', socket),
+ ('time', time),
+ ('urllib.error', error),
+ ('urllib.parse', parse),
+ ('urllib.response', response),
+]
+
+try:
+ from eventlet.green import ssl
+except ImportError:
+ pass
+else:
+ to_patch.append(('ssl', ssl))
+
+patcher.inject('urllib.request', globals(), *to_patch)
+del to_patch
+
+to_patch_in_functions = [('ftplib', ftplib)]
+del ftplib
+
+FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, *to_patch_in_functions)
+URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, *to_patch_in_functions)
+
+ftperrors = patcher.patch_function(ftperrors, *to_patch_in_functions)
+
+ftpwrapper.init = patcher.patch_function(ftpwrapper.init, *to_patch_in_functions)
+ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, *to_patch_in_functions)
+
+del error
+del parse
+del response
+del to_patch_in_functions
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib/response.py b/.venv/Lib/site-packages/eventlet/green/urllib/response.py
new file mode 100644
index 0000000..f9aaba5
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/urllib/response.py
@@ -0,0 +1,3 @@
+from eventlet import patcher
+patcher.inject('urllib.response', globals())
+del patcher
diff --git a/.venv/Lib/site-packages/eventlet/green/urllib2.py b/.venv/Lib/site-packages/eventlet/green/urllib2.py
new file mode 100644
index 0000000..c53ecbb
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/urllib2.py
@@ -0,0 +1,20 @@
+from eventlet import patcher
+from eventlet.green import ftplib
+from eventlet.green import httplib
+from eventlet.green import socket
+from eventlet.green import ssl
+from eventlet.green import time
+from eventlet.green import urllib
+
+patcher.inject(
+ 'urllib2',
+ globals(),
+ ('httplib', httplib),
+ ('socket', socket),
+ ('ssl', ssl),
+ ('time', time),
+ ('urllib', urllib))
+
+FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, ('ftplib', ftplib))
+
+del patcher
diff --git a/.venv/Lib/site-packages/eventlet/green/zmq.py b/.venv/Lib/site-packages/eventlet/green/zmq.py
new file mode 100644
index 0000000..865ee13
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/green/zmq.py
@@ -0,0 +1,465 @@
+"""The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context`
+found in :mod:`pyzmq ` to be non blocking.
+"""
+__zmq__ = __import__('zmq')
+import eventlet.hubs
+from eventlet.patcher import slurp_properties
+from eventlet.support import greenlets as greenlet
+
+__patched__ = ['Context', 'Socket']
+slurp_properties(__zmq__, globals(), ignore=__patched__)
+
+from collections import deque
+
+try:
+ # alias XREQ/XREP to DEALER/ROUTER if available
+ if not hasattr(__zmq__, 'XREQ'):
+ XREQ = DEALER
+ if not hasattr(__zmq__, 'XREP'):
+ XREP = ROUTER
+except NameError:
+ pass
+
+
+class LockReleaseError(Exception):
+ pass
+
+
+class _QueueLock:
+ """A Lock that can be acquired by at most one thread. Any other
+ thread calling acquire will be blocked in a queue. When release
+ is called, the threads are awoken in the order they blocked,
+ one at a time. This lock can be required recursively by the same
+ thread."""
+
+ def __init__(self):
+ self._waiters = deque()
+ self._count = 0
+ self._holder = None
+ self._hub = eventlet.hubs.get_hub()
+
+ def __nonzero__(self):
+ return bool(self._count)
+
+ __bool__ = __nonzero__
+
+ def __enter__(self):
+ self.acquire()
+
+ def __exit__(self, type, value, traceback):
+ self.release()
+
+ def acquire(self):
+ current = greenlet.getcurrent()
+ if (self._waiters or self._count > 0) and self._holder is not current:
+ # block until lock is free
+ self._waiters.append(current)
+ self._hub.switch()
+ w = self._waiters.popleft()
+
+ assert w is current, 'Waiting threads woken out of order'
+ assert self._count == 0, 'After waking a thread, the lock must be unacquired'
+
+ self._holder = current
+ self._count += 1
+
+ def release(self):
+ if self._count <= 0:
+ raise LockReleaseError("Cannot release unacquired lock")
+
+ self._count -= 1
+ if self._count == 0:
+ self._holder = None
+ if self._waiters:
+ # wake next
+ self._hub.schedule_call_global(0, self._waiters[0].switch)
+
+
+class _BlockedThread:
+ """Is either empty, or represents a single blocked thread that
+ blocked itself by calling the block() method. The thread can be
+ awoken by calling wake(). Wake() can be called multiple times and
+ all but the first call will have no effect."""
+
+ def __init__(self):
+ self._blocked_thread = None
+ self._wakeupper = None
+ self._hub = eventlet.hubs.get_hub()
+
+ def __nonzero__(self):
+ return self._blocked_thread is not None
+
+ __bool__ = __nonzero__
+
+ def block(self, deadline=None):
+ if self._blocked_thread is not None:
+ raise Exception("Cannot block more than one thread on one BlockedThread")
+ self._blocked_thread = greenlet.getcurrent()
+
+ if deadline is not None:
+ self._hub.schedule_call_local(deadline - self._hub.clock(), self.wake)
+
+ try:
+ self._hub.switch()
+ finally:
+ self._blocked_thread = None
+ # cleanup the wakeup task
+ if self._wakeupper is not None:
+ # Important to cancel the wakeup task so it doesn't
+ # spuriously wake this greenthread later on.
+ self._wakeupper.cancel()
+ self._wakeupper = None
+
+ def wake(self):
+ """Schedules the blocked thread to be awoken and return
+ True. If wake has already been called or if there is no
+ blocked thread, then this call has no effect and returns
+ False."""
+ if self._blocked_thread is not None and self._wakeupper is None:
+ self._wakeupper = self._hub.schedule_call_global(0, self._blocked_thread.switch)
+ return True
+ return False
+
+
+class Context(__zmq__.Context):
+ """Subclass of :class:`zmq.Context`
+ """
+
+ def socket(self, socket_type):
+ """Overridden method to ensure that the green version of socket is used
+
+ Behaves the same as :meth:`zmq.Context.socket`, but ensures
+ that a :class:`Socket` with all of its send and recv methods set to be
+ non-blocking is returned
+ """
+ if self.closed:
+ raise ZMQError(ENOTSUP)
+ return Socket(self, socket_type)
+
+
+def _wraps(source_fn):
+ """A decorator that copies the __name__ and __doc__ from the given
+ function
+ """
+ def wrapper(dest_fn):
+ dest_fn.__name__ = source_fn.__name__
+ dest_fn.__doc__ = source_fn.__doc__
+ return dest_fn
+ return wrapper
+
+
+# Implementation notes: Each socket in 0mq contains a pipe that the
+# background IO threads use to communicate with the socket. These
+# events are important because they tell the socket when it is able to
+# send and when it has messages waiting to be received. The read end
+# of the events pipe is the same FD that getsockopt(zmq.FD) returns.
+#
+# Events are read from the socket's event pipe only on the thread that
+# the 0mq context is associated with, which is the native thread the
+# greenthreads are running on, and the only operations that cause the
+# events to be read and processed are send(), recv() and
+# getsockopt(zmq.EVENTS). This means that after doing any of these
+# three operations, the ability of the socket to send or receive a
+# message without blocking may have changed, but after the events are
+# read the FD is no longer readable so the hub may not signal our
+# listener.
+#
+# If we understand that after calling send() a message might be ready
+# to be received and that after calling recv() a message might be able
+# to be sent, what should we do next? There are two approaches:
+#
+# 1. Always wake the other thread if there is one waiting. This
+# wakeup may be spurious because the socket might not actually be
+# ready for a send() or recv(). However, if a thread is in a
+# tight-loop successfully calling send() or recv() then the wakeups
+# are naturally batched and there's very little cost added to each
+# send/recv call.
+#
+# or
+#
+# 2. Call getsockopt(zmq.EVENTS) and explicitly check if the other
+# thread should be woken up. This avoids spurious wake-ups but may
+# add overhead because getsockopt will cause all events to be
+# processed, whereas send and recv throttle processing
+# events. Admittedly, all of the events will need to be processed
+# eventually, but it is likely faster to batch the processing.
+#
+# Which approach is better? I have no idea.
+#
+# TODO:
+# - Support MessageTrackers and make MessageTracker.wait green
+
+_Socket = __zmq__.Socket
+_Socket_recv = _Socket.recv
+_Socket_send = _Socket.send
+_Socket_send_multipart = _Socket.send_multipart
+_Socket_recv_multipart = _Socket.recv_multipart
+_Socket_send_string = _Socket.send_string
+_Socket_recv_string = _Socket.recv_string
+_Socket_send_pyobj = _Socket.send_pyobj
+_Socket_recv_pyobj = _Socket.recv_pyobj
+_Socket_send_json = _Socket.send_json
+_Socket_recv_json = _Socket.recv_json
+_Socket_getsockopt = _Socket.getsockopt
+
+
+class Socket(_Socket):
+ """Green version of :class:``zmq.core.socket.Socket``.
+
+ The following three methods are always overridden:
+ * send
+ * recv
+ * getsockopt
+ To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
+ is deferred to the hub (using :func:``eventlet.hubs.trampoline``) if a
+ ``zmq.EAGAIN`` (retry) error is raised.
+
+ For some socket types, the following methods are also overridden:
+ * send_multipart
+ * recv_multipart
+ """
+
+ def __init__(self, context, socket_type):
+ super().__init__(context, socket_type)
+
+ self.__dict__['_eventlet_send_event'] = _BlockedThread()
+ self.__dict__['_eventlet_recv_event'] = _BlockedThread()
+ self.__dict__['_eventlet_send_lock'] = _QueueLock()
+ self.__dict__['_eventlet_recv_lock'] = _QueueLock()
+
+ def event(fd):
+ # Some events arrived at the zmq socket. This may mean
+ # there's a message that can be read or there's space for
+ # a message to be written.
+ send_wake = self._eventlet_send_event.wake()
+ recv_wake = self._eventlet_recv_event.wake()
+ if not send_wake and not recv_wake:
+ # if no waiting send or recv thread was woken up, then
+ # force the zmq socket's events to be processed to
+ # avoid repeated wakeups
+ _Socket_getsockopt(self, EVENTS)
+
+ hub = eventlet.hubs.get_hub()
+ self.__dict__['_eventlet_listener'] = hub.add(hub.READ,
+ self.getsockopt(FD),
+ event,
+ lambda _: None,
+ lambda: None)
+ self.__dict__['_eventlet_clock'] = hub.clock
+
+ @_wraps(_Socket.close)
+ def close(self, linger=None):
+ super().close(linger)
+ if self._eventlet_listener is not None:
+ eventlet.hubs.get_hub().remove(self._eventlet_listener)
+ self.__dict__['_eventlet_listener'] = None
+ # wake any blocked threads
+ self._eventlet_send_event.wake()
+ self._eventlet_recv_event.wake()
+
+ @_wraps(_Socket.getsockopt)
+ def getsockopt(self, option):
+ result = _Socket_getsockopt(self, option)
+ if option == EVENTS:
+ # Getting the events causes the zmq socket to process
+ # events which may mean a msg can be sent or received. If
+ # there is a greenthread blocked and waiting for events,
+ # it will miss the edge-triggered read event, so wake it
+ # up.
+ if (result & POLLOUT):
+ self._eventlet_send_event.wake()
+ if (result & POLLIN):
+ self._eventlet_recv_event.wake()
+ return result
+
+ @_wraps(_Socket.send)
+ def send(self, msg, flags=0, copy=True, track=False):
+ """A send method that's safe to use when multiple greenthreads
+ are calling send, send_multipart, recv and recv_multipart on
+ the same socket.
+ """
+ if flags & NOBLOCK:
+ result = _Socket_send(self, msg, flags, copy, track)
+ # Instead of calling both wake methods, could call
+ # self.getsockopt(EVENTS) which would trigger wakeups if
+ # needed.
+ self._eventlet_send_event.wake()
+ self._eventlet_recv_event.wake()
+ return result
+
+ # TODO: pyzmq will copy the message buffer and create Message
+ # objects under some circumstances. We could do that work here
+ # once to avoid doing it every time the send is retried.
+ flags |= NOBLOCK
+ with self._eventlet_send_lock:
+ while True:
+ try:
+ return _Socket_send(self, msg, flags, copy, track)
+ except ZMQError as e:
+ if e.errno == EAGAIN:
+ self._eventlet_send_event.block()
+ else:
+ raise
+ finally:
+ # The call to send processes 0mq events and may
+ # make the socket ready to recv. Wake the next
+ # receiver. (Could check EVENTS for POLLIN here)
+ self._eventlet_recv_event.wake()
+
+ @_wraps(_Socket.send_multipart)
+ def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
+ """A send_multipart method that's safe to use when multiple
+ greenthreads are calling send, send_multipart, recv and
+ recv_multipart on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_send_multipart(self, msg_parts, flags, copy, track)
+
+ # acquire lock here so the subsequent calls to send for the
+ # message parts after the first don't block
+ with self._eventlet_send_lock:
+ return _Socket_send_multipart(self, msg_parts, flags, copy, track)
+
+ @_wraps(_Socket.send_string)
+ def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
+ """A send_string method that's safe to use when multiple
+ greenthreads are calling send, send_string, recv and
+ recv_string on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_send_string(self, u, flags, copy, encoding)
+
+ # acquire lock here so the subsequent calls to send for the
+ # message parts after the first don't block
+ with self._eventlet_send_lock:
+ return _Socket_send_string(self, u, flags, copy, encoding)
+
+ @_wraps(_Socket.send_pyobj)
+ def send_pyobj(self, obj, flags=0, protocol=2):
+ """A send_pyobj method that's safe to use when multiple
+ greenthreads are calling send, send_pyobj, recv and
+ recv_pyobj on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_send_pyobj(self, obj, flags, protocol)
+
+ # acquire lock here so the subsequent calls to send for the
+ # message parts after the first don't block
+ with self._eventlet_send_lock:
+ return _Socket_send_pyobj(self, obj, flags, protocol)
+
+ @_wraps(_Socket.send_json)
+ def send_json(self, obj, flags=0, **kwargs):
+ """A send_json method that's safe to use when multiple
+ greenthreads are calling send, send_json, recv and
+ recv_json on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_send_json(self, obj, flags, **kwargs)
+
+ # acquire lock here so the subsequent calls to send for the
+ # message parts after the first don't block
+ with self._eventlet_send_lock:
+ return _Socket_send_json(self, obj, flags, **kwargs)
+
+ @_wraps(_Socket.recv)
+ def recv(self, flags=0, copy=True, track=False):
+ """A recv method that's safe to use when multiple greenthreads
+ are calling send, send_multipart, recv and recv_multipart on
+ the same socket.
+ """
+ if flags & NOBLOCK:
+ msg = _Socket_recv(self, flags, copy, track)
+ # Instead of calling both wake methods, could call
+ # self.getsockopt(EVENTS) which would trigger wakeups if
+ # needed.
+ self._eventlet_send_event.wake()
+ self._eventlet_recv_event.wake()
+ return msg
+
+ deadline = None
+ if hasattr(__zmq__, 'RCVTIMEO'):
+ sock_timeout = self.getsockopt(__zmq__.RCVTIMEO)
+ if sock_timeout == -1:
+ pass
+ elif sock_timeout > 0:
+ deadline = self._eventlet_clock() + sock_timeout / 1000.0
+ else:
+ raise ValueError(sock_timeout)
+
+ flags |= NOBLOCK
+ with self._eventlet_recv_lock:
+ while True:
+ try:
+ return _Socket_recv(self, flags, copy, track)
+ except ZMQError as e:
+ if e.errno == EAGAIN:
+ # zmq in its wisdom decided to reuse EAGAIN for timeouts
+ if deadline is not None and self._eventlet_clock() > deadline:
+ e.is_timeout = True
+ raise
+
+ self._eventlet_recv_event.block(deadline=deadline)
+ else:
+ raise
+ finally:
+ # The call to recv processes 0mq events and may
+ # make the socket ready to send. Wake the next
+ # receiver. (Could check EVENTS for POLLOUT here)
+ self._eventlet_send_event.wake()
+
+ @_wraps(_Socket.recv_multipart)
+ def recv_multipart(self, flags=0, copy=True, track=False):
+ """A recv_multipart method that's safe to use when multiple
+ greenthreads are calling send, send_multipart, recv and
+ recv_multipart on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_recv_multipart(self, flags, copy, track)
+
+ # acquire lock here so the subsequent calls to recv for the
+ # message parts after the first don't block
+ with self._eventlet_recv_lock:
+ return _Socket_recv_multipart(self, flags, copy, track)
+
+ @_wraps(_Socket.recv_string)
+ def recv_string(self, flags=0, encoding='utf-8'):
+ """A recv_string method that's safe to use when multiple
+ greenthreads are calling send, send_string, recv and
+ recv_string on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_recv_string(self, flags, encoding)
+
+ # acquire lock here so the subsequent calls to recv for the
+ # message parts after the first don't block
+ with self._eventlet_recv_lock:
+ return _Socket_recv_string(self, flags, encoding)
+
+ @_wraps(_Socket.recv_json)
+ def recv_json(self, flags=0, **kwargs):
+ """A recv_json method that's safe to use when multiple
+ greenthreads are calling send, send_json, recv and
+ recv_json on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_recv_json(self, flags, **kwargs)
+
+ # acquire lock here so the subsequent calls to recv for the
+ # message parts after the first don't block
+ with self._eventlet_recv_lock:
+ return _Socket_recv_json(self, flags, **kwargs)
+
+ @_wraps(_Socket.recv_pyobj)
+ def recv_pyobj(self, flags=0):
+ """A recv_pyobj method that's safe to use when multiple
+ greenthreads are calling send, send_pyobj, recv and
+ recv_pyobj on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_recv_pyobj(self, flags)
+
+ # acquire lock here so the subsequent calls to recv for the
+ # message parts after the first don't block
+ with self._eventlet_recv_lock:
+ return _Socket_recv_pyobj(self, flags)
diff --git a/.venv/Lib/site-packages/eventlet/greenio/__init__.py b/.venv/Lib/site-packages/eventlet/greenio/__init__.py
new file mode 100644
index 0000000..513c4a5
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/greenio/__init__.py
@@ -0,0 +1,3 @@
+from eventlet.greenio.base import * # noqa
+
+from eventlet.greenio.py3 import * # noqa
diff --git a/.venv/Lib/site-packages/eventlet/greenio/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/greenio/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..3121867
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/greenio/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/greenio/__pycache__/base.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/greenio/__pycache__/base.cpython-312.pyc
new file mode 100644
index 0000000..57e7372
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/greenio/__pycache__/base.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/greenio/__pycache__/py3.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/greenio/__pycache__/py3.cpython-312.pyc
new file mode 100644
index 0000000..eead79c
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/greenio/__pycache__/py3.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/greenio/base.py b/.venv/Lib/site-packages/eventlet/greenio/base.py
new file mode 100644
index 0000000..d216a71
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/greenio/base.py
@@ -0,0 +1,492 @@
+import errno
+import os
+import socket
+import sys
+import time
+import warnings
+
+import eventlet
+from eventlet.hubs import trampoline, notify_opened, IOClosed
+from eventlet.support import get_errno
+
+__all__ = [
+ 'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking',
+ 'SOCKET_BLOCKING', 'SOCKET_CLOSED', 'CONNECT_ERR', 'CONNECT_SUCCESS',
+ 'shutdown_safe', 'SSL',
+ 'socket_timeout',
+]
+
+BUFFER_SIZE = 4096
+CONNECT_ERR = {errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK}
+CONNECT_SUCCESS = {0, errno.EISCONN}
+if sys.platform[:3] == "win":
+ CONNECT_ERR.add(errno.WSAEINVAL) # Bug 67
+
+_original_socket = eventlet.patcher.original('socket').socket
+
+
+if sys.version_info >= (3, 10):
+ socket_timeout = socket.timeout # Really, TimeoutError
+else:
+ socket_timeout = eventlet.timeout.wrap_is_timeout(socket.timeout)
+
+
+def socket_connect(descriptor, address):
+ """
+ Attempts to connect to the address, returns the descriptor if it succeeds,
+ returns None if it needs to trampoline, and raises any exceptions.
+ """
+ err = descriptor.connect_ex(address)
+ if err in CONNECT_ERR:
+ return None
+ if err not in CONNECT_SUCCESS:
+ raise OSError(err, errno.errorcode[err])
+ return descriptor
+
+
+def socket_checkerr(descriptor):
+ err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err not in CONNECT_SUCCESS:
+ raise OSError(err, errno.errorcode[err])
+
+
+def socket_accept(descriptor):
+ """
+ Attempts to accept() on the descriptor, returns a client,address tuple
+ if it succeeds; returns None if it needs to trampoline, and raises
+ any exceptions.
+ """
+ try:
+ return descriptor.accept()
+ except OSError as e:
+ if get_errno(e) == errno.EWOULDBLOCK:
+ return None
+ raise
+
+
+if sys.platform[:3] == "win":
+ # winsock sometimes throws ENOTCONN
+ SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK}
+ SOCKET_CLOSED = {errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN}
+else:
+ # oddly, on linux/darwin, an unconnected socket is expected to block,
+ # so we treat ENOTCONN the same as EWOULDBLOCK
+ SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN}
+ SOCKET_CLOSED = {errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE}
+
+
+def set_nonblocking(fd):
+ """
+ Sets the descriptor to be nonblocking. Works on many file-like
+ objects as well as sockets. Only sockets can be nonblocking on
+ Windows, however.
+ """
+ try:
+ setblocking = fd.setblocking
+ except AttributeError:
+ # fd has no setblocking() method. It could be that this version of
+ # Python predates socket.setblocking(). In that case, we can still set
+ # the flag "by hand" on the underlying OS fileno using the fcntl
+ # module.
+ try:
+ import fcntl
+ except ImportError:
+ # Whoops, Windows has no fcntl module. This might not be a socket
+ # at all, but rather a file-like object with no setblocking()
+ # method. In particular, on Windows, pipes don't support
+ # non-blocking I/O and therefore don't have that method. Which
+ # means fcntl wouldn't help even if we could load it.
+ raise NotImplementedError("set_nonblocking() on a file object "
+ "with no setblocking() method "
+ "(Windows pipes don't support non-blocking I/O)")
+ # We managed to import fcntl.
+ fileno = fd.fileno()
+ orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
+ new_flags = orig_flags | os.O_NONBLOCK
+ if new_flags != orig_flags:
+ fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags)
+ else:
+ # socket supports setblocking()
+ setblocking(0)
+
+
+try:
+ from socket import _GLOBAL_DEFAULT_TIMEOUT
+except ImportError:
+ _GLOBAL_DEFAULT_TIMEOUT = object()
+
+
+class GreenSocket:
+ """
+ Green version of socket.socket class, that is intended to be 100%
+ API-compatible.
+
+ It also recognizes the keyword parameter, 'set_nonblocking=True'.
+ Pass False to indicate that socket is already in non-blocking mode
+ to save syscalls.
+ """
+
+ # This placeholder is to prevent __getattr__ from creating an infinite call loop
+ fd = None
+
+ def __init__(self, family=socket.AF_INET, *args, **kwargs):
+ should_set_nonblocking = kwargs.pop('set_nonblocking', True)
+ if isinstance(family, int):
+ fd = _original_socket(family, *args, **kwargs)
+ # Notify the hub that this is a newly-opened socket.
+ notify_opened(fd.fileno())
+ else:
+ fd = family
+
+ # import timeout from other socket, if it was there
+ try:
+ self._timeout = fd.gettimeout() or socket.getdefaulttimeout()
+ except AttributeError:
+ self._timeout = socket.getdefaulttimeout()
+
+ # Filter fd.fileno() != -1 so that won't call set non-blocking on
+ # closed socket
+ if should_set_nonblocking and fd.fileno() != -1:
+ set_nonblocking(fd)
+ self.fd = fd
+ # when client calls setblocking(0) or settimeout(0) the socket must
+ # act non-blocking
+ self.act_non_blocking = False
+
+ # Copy some attributes from underlying real socket.
+ # This is the easiest way that i found to fix
+ # https://bitbucket.org/eventlet/eventlet/issue/136
+ # Only `getsockopt` is required to fix that issue, others
+ # are just premature optimization to save __getattr__ call.
+ self.bind = fd.bind
+ self.close = fd.close
+ self.fileno = fd.fileno
+ self.getsockname = fd.getsockname
+ self.getsockopt = fd.getsockopt
+ self.listen = fd.listen
+ self.setsockopt = fd.setsockopt
+ self.shutdown = fd.shutdown
+ self._closed = False
+
+ @property
+ def _sock(self):
+ return self
+
+ def _get_io_refs(self):
+ return self.fd._io_refs
+
+ def _set_io_refs(self, value):
+ self.fd._io_refs = value
+
+ _io_refs = property(_get_io_refs, _set_io_refs)
+
+ # Forward unknown attributes to fd, cache the value for future use.
+ # I do not see any simple attribute which could be changed
+ # so caching everything in self is fine.
+ # If we find such attributes - only attributes having __get__ might be cached.
+ # For now - I do not want to complicate it.
+ def __getattr__(self, name):
+ if self.fd is None:
+ raise AttributeError(name)
+ attr = getattr(self.fd, name)
+ setattr(self, name, attr)
+ return attr
+
+ def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
+ """ We need to trampoline via the event hub.
+ We catch any signal back from the hub indicating that the operation we
+ were waiting on was associated with a filehandle that's since been
+ invalidated.
+ """
+ if self._closed:
+ # If we did any logging, alerting to a second trampoline attempt on a closed
+ # socket here would be useful.
+ raise IOClosed()
+ try:
+ return trampoline(fd, read=read, write=write, timeout=timeout,
+ timeout_exc=timeout_exc,
+ mark_as_closed=self._mark_as_closed)
+ except IOClosed:
+ # This socket's been obsoleted. De-fang it.
+ self._mark_as_closed()
+ raise
+
+ def accept(self):
+ if self.act_non_blocking:
+ res = self.fd.accept()
+ notify_opened(res[0].fileno())
+ return res
+ fd = self.fd
+ _timeout_exc = socket_timeout('timed out')
+ while True:
+ res = socket_accept(fd)
+ if res is not None:
+ client, addr = res
+ notify_opened(client.fileno())
+ set_nonblocking(client)
+ return type(self)(client), addr
+ self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc)
+
+ def _mark_as_closed(self):
+ """ Mark this socket as being closed """
+ self._closed = True
+
+ def __del__(self):
+ # This is in case self.close is not assigned yet (currently the constructor does it)
+ close = getattr(self, 'close', None)
+ if close is not None:
+ close()
+
+ def connect(self, address):
+ if self.act_non_blocking:
+ return self.fd.connect(address)
+ fd = self.fd
+ _timeout_exc = socket_timeout('timed out')
+ if self.gettimeout() is None:
+ while not socket_connect(fd, address):
+ try:
+ self._trampoline(fd, write=True)
+ except IOClosed:
+ raise OSError(errno.EBADFD)
+ socket_checkerr(fd)
+ else:
+ end = time.time() + self.gettimeout()
+ while True:
+ if socket_connect(fd, address):
+ return
+ if time.time() >= end:
+ raise _timeout_exc
+ timeout = end - time.time()
+ try:
+ self._trampoline(fd, write=True, timeout=timeout, timeout_exc=_timeout_exc)
+ except IOClosed:
+ # ... we need some workable errno here.
+ raise OSError(errno.EBADFD)
+ socket_checkerr(fd)
+
+ def connect_ex(self, address):
+ if self.act_non_blocking:
+ return self.fd.connect_ex(address)
+ fd = self.fd
+ if self.gettimeout() is None:
+ while not socket_connect(fd, address):
+ try:
+ self._trampoline(fd, write=True)
+ socket_checkerr(fd)
+ except OSError as ex:
+ return get_errno(ex)
+ except IOClosed:
+ return errno.EBADFD
+ return 0
+ else:
+ end = time.time() + self.gettimeout()
+ timeout_exc = socket.timeout(errno.EAGAIN)
+ while True:
+ try:
+ if socket_connect(fd, address):
+ return 0
+ if time.time() >= end:
+ raise timeout_exc
+ self._trampoline(fd, write=True, timeout=end - time.time(),
+ timeout_exc=timeout_exc)
+ socket_checkerr(fd)
+ except OSError as ex:
+ return get_errno(ex)
+ except IOClosed:
+ return errno.EBADFD
+ return 0
+
+ def dup(self, *args, **kw):
+ sock = self.fd.dup(*args, **kw)
+ newsock = type(self)(sock, set_nonblocking=False)
+ newsock.settimeout(self.gettimeout())
+ return newsock
+
+ def makefile(self, *args, **kwargs):
+ return _original_socket.makefile(self, *args, **kwargs)
+
+ def makeGreenFile(self, *args, **kw):
+ warnings.warn("makeGreenFile has been deprecated, please use "
+ "makefile instead", DeprecationWarning, stacklevel=2)
+ return self.makefile(*args, **kw)
+
+ def _read_trampoline(self):
+ self._trampoline(
+ self.fd,
+ read=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket_timeout('timed out'))
+
+ def _recv_loop(self, recv_meth, empty_val, *args):
+ if self.act_non_blocking:
+ return recv_meth(*args)
+
+ while True:
+ try:
+ # recv: bufsize=0?
+ # recv_into: buffer is empty?
+ # This is needed because behind the scenes we use sockets in
+ # nonblocking mode and builtin recv* methods. Attempting to read
+ # 0 bytes from a nonblocking socket using a builtin recv* method
+ # does not raise a timeout exception. Since we're simulating
+ # a blocking socket here we need to produce a timeout exception
+ # if needed, hence the call to trampoline.
+ if not args[0]:
+ self._read_trampoline()
+ return recv_meth(*args)
+ except OSError as e:
+ if get_errno(e) in SOCKET_BLOCKING:
+ pass
+ elif get_errno(e) in SOCKET_CLOSED:
+ return empty_val
+ else:
+ raise
+
+ try:
+ self._read_trampoline()
+ except IOClosed as e:
+ # Perhaps we should return '' instead?
+ raise EOFError()
+
+ def recv(self, bufsize, flags=0):
+ return self._recv_loop(self.fd.recv, b'', bufsize, flags)
+
+ def recvfrom(self, bufsize, flags=0):
+ return self._recv_loop(self.fd.recvfrom, b'', bufsize, flags)
+
+ def recv_into(self, buffer, nbytes=0, flags=0):
+ return self._recv_loop(self.fd.recv_into, 0, buffer, nbytes, flags)
+
+ def recvfrom_into(self, buffer, nbytes=0, flags=0):
+ return self._recv_loop(self.fd.recvfrom_into, 0, buffer, nbytes, flags)
+
+ def _send_loop(self, send_method, data, *args):
+ if self.act_non_blocking:
+ return send_method(data, *args)
+
+ _timeout_exc = socket_timeout('timed out')
+ while True:
+ try:
+ return send_method(data, *args)
+ except OSError as e:
+ eno = get_errno(e)
+ if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
+ raise
+
+ try:
+ self._trampoline(self.fd, write=True, timeout=self.gettimeout(),
+ timeout_exc=_timeout_exc)
+ except IOClosed:
+ raise OSError(errno.ECONNRESET, 'Connection closed by another thread')
+
+ def send(self, data, flags=0):
+ return self._send_loop(self.fd.send, data, flags)
+
+ def sendto(self, data, *args):
+ return self._send_loop(self.fd.sendto, data, *args)
+
+ def sendall(self, data, flags=0):
+ tail = self.send(data, flags)
+ len_data = len(data)
+ while tail < len_data:
+ tail += self.send(data[tail:], flags)
+
+ def setblocking(self, flag):
+ if flag:
+ self.act_non_blocking = False
+ self._timeout = None
+ else:
+ self.act_non_blocking = True
+ self._timeout = 0.0
+
+ def settimeout(self, howlong):
+ if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT:
+ self.setblocking(True)
+ return
+ try:
+ f = howlong.__float__
+ except AttributeError:
+ raise TypeError('a float is required')
+ howlong = f()
+ if howlong < 0.0:
+ raise ValueError('Timeout value out of range')
+ if howlong == 0.0:
+ self.act_non_blocking = True
+ self._timeout = 0.0
+ else:
+ self.act_non_blocking = False
+ self._timeout = howlong
+
+ def gettimeout(self):
+ return self._timeout
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+ if "__pypy__" in sys.builtin_module_names:
+ def _reuse(self):
+ getattr(self.fd, '_sock', self.fd)._reuse()
+
+ def _drop(self):
+ getattr(self.fd, '_sock', self.fd)._drop()
+
+
+def _operation_on_closed_file(*args, **kwargs):
+ raise ValueError("I/O operation on closed file")
+
+
+greenpipe_doc = """
+ GreenPipe is a cooperative replacement for file class.
+ It will cooperate on pipes. It will block on regular file.
+ Differences from file class:
+ - mode is r/w property. Should re r/o
+ - encoding property not implemented
+ - write/writelines will not raise TypeError exception when non-string data is written
+ it will write str(data) instead
+ - Universal new lines are not supported and newlines property not implementeded
+ - file argument can be descriptor, file name or file object.
+ """
+
+# import SSL module here so we can refer to greenio.SSL.exceptionclass
+try:
+ from OpenSSL import SSL
+except ImportError:
+ # pyOpenSSL not installed, define exceptions anyway for convenience
+ class SSL:
+ class WantWriteError(Exception):
+ pass
+
+ class WantReadError(Exception):
+ pass
+
+ class ZeroReturnError(Exception):
+ pass
+
+ class SysCallError(Exception):
+ pass
+
+
+def shutdown_safe(sock):
+ """Shuts down the socket. This is a convenience method for
+ code that wants to gracefully handle regular sockets, SSL.Connection
+ sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.7 interchangeably.
+ Both types of ssl socket require a shutdown() before close,
+ but they have different arity on their shutdown method.
+
+ Regular sockets don't need a shutdown before close, but it doesn't hurt.
+ """
+ try:
+ try:
+ # socket, ssl.SSLSocket
+ return sock.shutdown(socket.SHUT_RDWR)
+ except TypeError:
+ # SSL.Connection
+ return sock.shutdown()
+ except OSError as e:
+ # we don't care if the socket is already closed;
+ # this will often be the case in an http server context
+ if get_errno(e) not in (errno.ENOTCONN, errno.EBADF, errno.ENOTSOCK):
+ raise
diff --git a/.venv/Lib/site-packages/eventlet/greenio/py3.py b/.venv/Lib/site-packages/eventlet/greenio/py3.py
new file mode 100644
index 0000000..941f49c
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/greenio/py3.py
@@ -0,0 +1,219 @@
+import _pyio as _original_pyio
+import errno
+import os as _original_os
+import socket as _original_socket
+from io import (
+ BufferedRandom as _OriginalBufferedRandom,
+ BufferedReader as _OriginalBufferedReader,
+ BufferedWriter as _OriginalBufferedWriter,
+ DEFAULT_BUFFER_SIZE,
+ TextIOWrapper as _OriginalTextIOWrapper,
+ IOBase as _OriginalIOBase,
+)
+from types import FunctionType
+
+from eventlet.greenio.base import (
+ _operation_on_closed_file,
+ greenpipe_doc,
+ set_nonblocking,
+ SOCKET_BLOCKING,
+)
+from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline
+from eventlet.support import get_errno
+
+__all__ = ['_fileobject', 'GreenPipe']
+
+# TODO get rid of this, it only seems like the original _fileobject
+_fileobject = _original_socket.SocketIO
+
+# Large part of the following code is copied from the original
+# eventlet.greenio module
+
+
+class GreenFileIO(_OriginalIOBase):
+ def __init__(self, name, mode='r', closefd=True, opener=None):
+ if isinstance(name, int):
+ fileno = name
+ self._name = "" % fileno
+ else:
+ assert isinstance(name, str)
+ with open(name, mode) as fd:
+ self._name = fd.name
+ fileno = _original_os.dup(fd.fileno())
+
+ notify_opened(fileno)
+ self._fileno = fileno
+ self._mode = mode
+ self._closed = False
+ set_nonblocking(self)
+ self._seekable = None
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def seekable(self):
+ if self._seekable is None:
+ try:
+ _original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR)
+ except OSError as e:
+ if get_errno(e) == errno.ESPIPE:
+ self._seekable = False
+ else:
+ raise
+ else:
+ self._seekable = True
+
+ return self._seekable
+
+ def readable(self):
+ return 'r' in self._mode or '+' in self._mode
+
+ def writable(self):
+ return 'w' in self._mode or '+' in self._mode or 'a' in self._mode
+
+ def fileno(self):
+ return self._fileno
+
+ def read(self, size=-1):
+ if size == -1:
+ return self.readall()
+
+ while True:
+ try:
+ return _original_os.read(self._fileno, size)
+ except OSError as e:
+ if get_errno(e) not in SOCKET_BLOCKING:
+ raise OSError(*e.args)
+ self._trampoline(self, read=True)
+
+ def readall(self):
+ buf = []
+ while True:
+ try:
+ chunk = _original_os.read(self._fileno, DEFAULT_BUFFER_SIZE)
+ if chunk == b'':
+ return b''.join(buf)
+ buf.append(chunk)
+ except OSError as e:
+ if get_errno(e) not in SOCKET_BLOCKING:
+ raise OSError(*e.args)
+ self._trampoline(self, read=True)
+
+ def readinto(self, b):
+ up_to = len(b)
+ data = self.read(up_to)
+ bytes_read = len(data)
+ b[:bytes_read] = data
+ return bytes_read
+
+ def isatty(self):
+ try:
+ return _original_os.isatty(self.fileno())
+ except OSError as e:
+ raise OSError(*e.args)
+
+ def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
+ if self._closed:
+ # Don't trampoline if we're already closed.
+ raise IOClosed()
+ try:
+ return trampoline(fd, read=read, write=write, timeout=timeout,
+ timeout_exc=timeout_exc,
+ mark_as_closed=self._mark_as_closed)
+ except IOClosed:
+ # Our fileno has been obsoleted. Defang ourselves to
+ # prevent spurious closes.
+ self._mark_as_closed()
+ raise
+
+ def _mark_as_closed(self):
+ """ Mark this socket as being closed """
+ self._closed = True
+
+ def write(self, data):
+ view = memoryview(data)
+ datalen = len(data)
+ offset = 0
+ while offset < datalen:
+ try:
+ written = _original_os.write(self._fileno, view[offset:])
+ except OSError as e:
+ if get_errno(e) not in SOCKET_BLOCKING:
+ raise OSError(*e.args)
+ trampoline(self, write=True)
+ else:
+ offset += written
+ return offset
+
+ def close(self):
+ if not self._closed:
+ self._closed = True
+ _original_os.close(self._fileno)
+ notify_close(self._fileno)
+ for method in [
+ 'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
+ 'readline', 'readlines', 'seek', 'tell', 'truncate',
+ 'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
+ setattr(self, method, _operation_on_closed_file)
+
+ def truncate(self, size=-1):
+ if size is None:
+ size = -1
+ if size == -1:
+ size = self.tell()
+ try:
+ rv = _original_os.ftruncate(self._fileno, size)
+ except OSError as e:
+ raise OSError(*e.args)
+ else:
+ self.seek(size) # move position&clear buffer
+ return rv
+
+ def seek(self, offset, whence=_original_os.SEEK_SET):
+ try:
+ return _original_os.lseek(self._fileno, offset, whence)
+ except OSError as e:
+ raise OSError(*e.args)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+_open_environment = dict(globals())
+_open_environment.update(dict(
+ BufferedRandom=_OriginalBufferedRandom,
+ BufferedWriter=_OriginalBufferedWriter,
+ BufferedReader=_OriginalBufferedReader,
+ TextIOWrapper=_OriginalTextIOWrapper,
+ FileIO=GreenFileIO,
+ os=_original_os,
+))
+if hasattr(_original_pyio, 'text_encoding'):
+ _open_environment['text_encoding'] = _original_pyio.text_encoding
+
+_pyio_open = getattr(_original_pyio.open, '__wrapped__', _original_pyio.open)
+_open = FunctionType(
+ _pyio_open.__code__,
+ _open_environment,
+)
+
+
+def GreenPipe(name, mode="r", buffering=-1, encoding=None, errors=None,
+ newline=None, closefd=True, opener=None):
+ try:
+ fileno = name.fileno()
+ except AttributeError:
+ pass
+ else:
+ fileno = _original_os.dup(fileno)
+ name.close()
+ name = fileno
+
+ return _open(name, mode, buffering, encoding, errors, newline, closefd, opener)
+
+
+GreenPipe.__doc__ = greenpipe_doc
diff --git a/.venv/Lib/site-packages/eventlet/greenpool.py b/.venv/Lib/site-packages/eventlet/greenpool.py
new file mode 100644
index 0000000..c77df89
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/greenpool.py
@@ -0,0 +1,256 @@
+import traceback
+
+import eventlet
+from eventlet import queue
+from eventlet.support import greenlets as greenlet
+
+__all__ = ['GreenPool', 'GreenPile']
+
+DEBUG = True
+
+
+class GreenPool:
+ """The GreenPool class is a pool of green threads.
+ """
+
+ def __init__(self, size=1000):
+ try:
+ size = int(size)
+ except ValueError as e:
+ msg = 'GreenPool() expect size :: int, actual: {} {}'.format(type(size), str(e))
+ raise TypeError(msg)
+ if size < 0:
+ msg = 'GreenPool() expect size >= 0, actual: {}'.format(repr(size))
+ raise ValueError(msg)
+ self.size = size
+ self.coroutines_running = set()
+ self.sem = eventlet.Semaphore(size)
+ self.no_coros_running = eventlet.Event()
+
+ def resize(self, new_size):
+ """ Change the max number of greenthreads doing work at any given time.
+
+ If resize is called when there are more than *new_size* greenthreads
+ already working on tasks, they will be allowed to complete but no new
+ tasks will be allowed to get launched until enough greenthreads finish
+ their tasks to drop the overall quantity below *new_size*. Until
+ then, the return value of free() will be negative.
+ """
+ size_delta = new_size - self.size
+ self.sem.counter += size_delta
+ self.size = new_size
+
+ def running(self):
+ """ Returns the number of greenthreads that are currently executing
+ functions in the GreenPool."""
+ return len(self.coroutines_running)
+
+ def free(self):
+ """ Returns the number of greenthreads available for use.
+
+ If zero or less, the next call to :meth:`spawn` or :meth:`spawn_n` will
+ block the calling greenthread until a slot becomes available."""
+ return self.sem.counter
+
+ def spawn(self, function, *args, **kwargs):
+ """Run the *function* with its arguments in its own green thread.
+ Returns the :class:`GreenThread `
+ object that is running the function, which can be used to retrieve the
+ results.
+
+ If the pool is currently at capacity, ``spawn`` will block until one of
+ the running greenthreads completes its task and frees up a slot.
+
+ This function is reentrant; *function* can call ``spawn`` on the same
+ pool without risk of deadlocking the whole thing.
+ """
+ # if reentering an empty pool, don't try to wait on a coroutine freeing
+ # itself -- instead, just execute in the current coroutine
+ current = eventlet.getcurrent()
+ if self.sem.locked() and current in self.coroutines_running:
+ # a bit hacky to use the GT without switching to it
+ gt = eventlet.greenthread.GreenThread(current)
+ gt.main(function, args, kwargs)
+ return gt
+ else:
+ self.sem.acquire()
+ gt = eventlet.spawn(function, *args, **kwargs)
+ if not self.coroutines_running:
+ self.no_coros_running = eventlet.Event()
+ self.coroutines_running.add(gt)
+ gt.link(self._spawn_done)
+ return gt
+
+ def _spawn_n_impl(self, func, args, kwargs, coro):
+ try:
+ try:
+ func(*args, **kwargs)
+ except (KeyboardInterrupt, SystemExit, greenlet.GreenletExit):
+ raise
+ except:
+ if DEBUG:
+ traceback.print_exc()
+ finally:
+ if coro is None:
+ return
+ else:
+ coro = eventlet.getcurrent()
+ self._spawn_done(coro)
+
+ def spawn_n(self, function, *args, **kwargs):
+ """Create a greenthread to run the *function*, the same as
+ :meth:`spawn`. The difference is that :meth:`spawn_n` returns
+ None; the results of *function* are not retrievable.
+ """
+ # if reentering an empty pool, don't try to wait on a coroutine freeing
+ # itself -- instead, just execute in the current coroutine
+ current = eventlet.getcurrent()
+ if self.sem.locked() and current in self.coroutines_running:
+ self._spawn_n_impl(function, args, kwargs, None)
+ else:
+ self.sem.acquire()
+ g = eventlet.spawn_n(
+ self._spawn_n_impl,
+ function, args, kwargs, True)
+ if not self.coroutines_running:
+ self.no_coros_running = eventlet.Event()
+ self.coroutines_running.add(g)
+
+ def waitall(self):
+ """Waits until all greenthreads in the pool are finished working."""
+ assert eventlet.getcurrent() not in self.coroutines_running, \
+ "Calling waitall() from within one of the " \
+ "GreenPool's greenthreads will never terminate."
+ if self.running():
+ self.no_coros_running.wait()
+
+ def _spawn_done(self, coro):
+ self.sem.release()
+ if coro is not None:
+ self.coroutines_running.remove(coro)
+ # if done processing (no more work is waiting for processing),
+ # we can finish off any waitall() calls that might be pending
+ if self.sem.balance == self.size:
+ self.no_coros_running.send(None)
+
+ def waiting(self):
+ """Return the number of greenthreads waiting to spawn.
+ """
+ if self.sem.balance < 0:
+ return -self.sem.balance
+ else:
+ return 0
+
+ def _do_map(self, func, it, gi):
+ for args in it:
+ gi.spawn(func, *args)
+ gi.done_spawning()
+
+ def starmap(self, function, iterable):
+ """This is the same as :func:`itertools.starmap`, except that *func* is
+ executed in a separate green thread for each item, with the concurrency
+ limited by the pool's size. In operation, starmap consumes a constant
+ amount of memory, proportional to the size of the pool, and is thus
+ suited for iterating over extremely long input lists.
+ """
+ if function is None:
+ function = lambda *a: a
+ # We use a whole separate greenthread so its spawn() calls can block
+ # without blocking OUR caller. On the other hand, we must assume that
+ # our caller will immediately start trying to iterate over whatever we
+ # return. If that were a GreenPile, our caller would always see an
+ # empty sequence because the hub hasn't even entered _do_map() yet --
+ # _do_map() hasn't had a chance to spawn a single greenthread on this
+ # GreenPool! A GreenMap is safe to use with different producer and
+ # consumer greenthreads, because it doesn't raise StopIteration until
+ # the producer has explicitly called done_spawning().
+ gi = GreenMap(self.size)
+ eventlet.spawn_n(self._do_map, function, iterable, gi)
+ return gi
+
+ def imap(self, function, *iterables):
+ """This is the same as :func:`itertools.imap`, and has the same
+ concurrency and memory behavior as :meth:`starmap`.
+
+ It's quite convenient for, e.g., farming out jobs from a file::
+
+ def worker(line):
+ return do_something(line)
+ pool = GreenPool()
+ for result in pool.imap(worker, open("filename", 'r')):
+ print(result)
+ """
+ return self.starmap(function, zip(*iterables))
+
+
+class GreenPile:
+ """GreenPile is an abstraction representing a bunch of I/O-related tasks.
+
+ Construct a GreenPile with an existing GreenPool object. The GreenPile will
+ then use that pool's concurrency as it processes its jobs. There can be
+ many GreenPiles associated with a single GreenPool.
+
+ A GreenPile can also be constructed standalone, not associated with any
+ GreenPool. To do this, construct it with an integer size parameter instead
+ of a GreenPool.
+
+ It is not advisable to iterate over a GreenPile in a different greenthread
+ than the one which is calling spawn. The iterator will exit early in that
+ situation.
+ """
+
+ def __init__(self, size_or_pool=1000):
+ if isinstance(size_or_pool, GreenPool):
+ self.pool = size_or_pool
+ else:
+ self.pool = GreenPool(size_or_pool)
+ self.waiters = queue.LightQueue()
+ self.counter = 0
+
+ def spawn(self, func, *args, **kw):
+ """Runs *func* in its own green thread, with the result available by
+ iterating over the GreenPile object."""
+ self.counter += 1
+ try:
+ gt = self.pool.spawn(func, *args, **kw)
+ self.waiters.put(gt)
+ except:
+ self.counter -= 1
+ raise
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ """Wait for the next result, suspending the current greenthread until it
+ is available. Raises StopIteration when there are no more results."""
+ if self.counter == 0:
+ raise StopIteration()
+ return self._next()
+ __next__ = next
+
+ def _next(self):
+ try:
+ return self.waiters.get().wait()
+ finally:
+ self.counter -= 1
+
+
+# this is identical to GreenPile but it blocks on spawn if the results
+# aren't consumed, and it doesn't generate its own StopIteration exception,
+# instead relying on the spawning process to send one in when it's done
+class GreenMap(GreenPile):
+ def __init__(self, size_or_pool):
+ super().__init__(size_or_pool)
+ self.waiters = queue.LightQueue(maxsize=self.pool.size)
+
+ def done_spawning(self):
+ self.spawn(lambda: StopIteration())
+
+ def next(self):
+ val = self._next()
+ if isinstance(val, StopIteration):
+ raise val
+ else:
+ return val
+ __next__ = next
diff --git a/.venv/Lib/site-packages/eventlet/greenthread.py b/.venv/Lib/site-packages/eventlet/greenthread.py
new file mode 100644
index 0000000..8041def
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/greenthread.py
@@ -0,0 +1,346 @@
+from collections import deque
+import sys
+
+from greenlet import GreenletExit
+
+from eventlet import event
+from eventlet import hubs
+from eventlet import support
+from eventlet import timeout
+from eventlet.hubs import timer
+from eventlet.support import greenlets as greenlet
+import warnings
+
+__all__ = ['getcurrent', 'sleep', 'spawn', 'spawn_n',
+ 'kill',
+ 'spawn_after', 'spawn_after_local', 'GreenThread']
+
+getcurrent = greenlet.getcurrent
+
+
+def sleep(seconds=0):
+ """Yield control to another eligible coroutine until at least *seconds* have
+ elapsed.
+
+ *seconds* may be specified as an integer, or a float if fractional seconds
+ are desired. Calling :func:`~greenthread.sleep` with *seconds* of 0 is the
+ canonical way of expressing a cooperative yield. For example, if one is
+ looping over a large list performing an expensive calculation without
+ calling any socket methods, it's a good idea to call ``sleep(0)``
+ occasionally; otherwise nothing else will run.
+ """
+ hub = hubs.get_hub()
+ current = getcurrent()
+ if hub.greenlet is current:
+ raise RuntimeError('do not call blocking functions from the mainloop')
+ timer = hub.schedule_call_global(seconds, current.switch)
+ try:
+ hub.switch()
+ finally:
+ timer.cancel()
+
+
+def spawn(func, *args, **kwargs):
+ """Create a greenthread to run ``func(*args, **kwargs)``. Returns a
+ :class:`GreenThread` object which you can use to get the results of the
+ call.
+
+ Execution control returns immediately to the caller; the created greenthread
+ is merely scheduled to be run at the next available opportunity.
+ Use :func:`spawn_after` to arrange for greenthreads to be spawned
+ after a finite delay.
+ """
+ hub = hubs.get_hub()
+ g = GreenThread(hub.greenlet)
+ hub.schedule_call_global(0, g.switch, func, args, kwargs)
+ return g
+
+
+def spawn_n(func, *args, **kwargs):
+ """Same as :func:`spawn`, but returns a ``greenlet`` object from
+ which it is not possible to retrieve either a return value or
+ whether it raised any exceptions. This is faster than
+ :func:`spawn`; it is fastest if there are no keyword arguments.
+
+ If an exception is raised in the function, spawn_n prints a stack
+ trace; the print can be disabled by calling
+ :func:`eventlet.debug.hub_exceptions` with False.
+ """
+ return _spawn_n(0, func, args, kwargs)[1]
+
+
+def spawn_after(seconds, func, *args, **kwargs):
+ """Spawns *func* after *seconds* have elapsed. It runs as scheduled even if
+ the current greenthread has completed.
+
+ *seconds* may be specified as an integer, or a float if fractional seconds
+ are desired. The *func* will be called with the given *args* and
+ keyword arguments *kwargs*, and will be executed within its own greenthread.
+
+ The return value of :func:`spawn_after` is a :class:`GreenThread` object,
+ which can be used to retrieve the results of the call.
+
+ To cancel the spawn and prevent *func* from being called,
+ call :meth:`GreenThread.cancel` on the return value of :func:`spawn_after`.
+ This will not abort the function if it's already started running, which is
+ generally the desired behavior. If terminating *func* regardless of whether
+ it's started or not is the desired behavior, call :meth:`GreenThread.kill`.
+ """
+ hub = hubs.get_hub()
+ g = GreenThread(hub.greenlet)
+ hub.schedule_call_global(seconds, g.switch, func, args, kwargs)
+ return g
+
+
+def spawn_after_local(seconds, func, *args, **kwargs):
+ """Spawns *func* after *seconds* have elapsed. The function will NOT be
+ called if the current greenthread has exited.
+
+ *seconds* may be specified as an integer, or a float if fractional seconds
+ are desired. The *func* will be called with the given *args* and
+ keyword arguments *kwargs*, and will be executed within its own greenthread.
+
+ The return value of :func:`spawn_after` is a :class:`GreenThread` object,
+ which can be used to retrieve the results of the call.
+
+ To cancel the spawn and prevent *func* from being called,
+ call :meth:`GreenThread.cancel` on the return value. This will not abort the
+ function if it's already started running. If terminating *func* regardless
+ of whether it's started or not is the desired behavior, call
+ :meth:`GreenThread.kill`.
+ """
+ hub = hubs.get_hub()
+ g = GreenThread(hub.greenlet)
+ hub.schedule_call_local(seconds, g.switch, func, args, kwargs)
+ return g
+
+
+def call_after_global(seconds, func, *args, **kwargs):
+ warnings.warn(
+ "call_after_global is renamed to spawn_after, which"
+ "has the same signature and semantics (plus a bit extra). Please do a"
+ " quick search-and-replace on your codebase, thanks!",
+ DeprecationWarning, stacklevel=2)
+ return _spawn_n(seconds, func, args, kwargs)[0]
+
+
+def call_after_local(seconds, function, *args, **kwargs):
+ warnings.warn(
+ "call_after_local is renamed to spawn_after_local, which"
+ "has the same signature and semantics (plus a bit extra).",
+ DeprecationWarning, stacklevel=2)
+ hub = hubs.get_hub()
+ g = greenlet.greenlet(function, parent=hub.greenlet)
+ t = hub.schedule_call_local(seconds, g.switch, *args, **kwargs)
+ return t
+
+
+call_after = call_after_local
+
+
+def exc_after(seconds, *throw_args):
+ warnings.warn("Instead of exc_after, which is deprecated, use "
+ "Timeout(seconds, exception)",
+ DeprecationWarning, stacklevel=2)
+ if seconds is None: # dummy argument, do nothing
+ return timer.Timer(seconds, lambda: None)
+ hub = hubs.get_hub()
+ return hub.schedule_call_local(seconds, getcurrent().throw, *throw_args)
+
+
+# deprecate, remove
+TimeoutError, with_timeout = (
+ support.wrap_deprecated(old, new)(fun) for old, new, fun in (
+ ('greenthread.TimeoutError', 'Timeout', timeout.Timeout),
+ ('greenthread.with_timeout', 'with_timeout', timeout.with_timeout),
+ ))
+
+
+def _spawn_n(seconds, func, args, kwargs):
+ hub = hubs.get_hub()
+ g = greenlet.greenlet(func, parent=hub.greenlet)
+ t = hub.schedule_call_global(seconds, g.switch, *args, **kwargs)
+ return t, g
+
+
+class GreenThread(greenlet.greenlet):
+ """The GreenThread class is a type of Greenlet which has the additional
+ property of being able to retrieve the return value of the main function.
+ Do not construct GreenThread objects directly; call :func:`spawn` to get one.
+ """
+
+ def __init__(self, parent):
+ greenlet.greenlet.__init__(self, self.main, parent)
+ self._exit_event = event.Event()
+ self._resolving_links = False
+ self._exit_funcs = None
+
+ def __await__(self):
+ """
+ Enable ``GreenThread``s to be ``await``ed in ``async`` functions.
+ """
+ from eventlet.hubs.asyncio import Hub
+ hub = hubs.get_hub()
+ if not isinstance(hub, Hub):
+ raise RuntimeError(
+ "This API only works with eventlet's asyncio hub. "
+ + "To use it, set an EVENTLET_HUB=asyncio environment variable."
+ )
+
+ future = hub.loop.create_future()
+
+ # When the Future finishes, check if it was due to cancellation:
+ def got_future_result(future):
+ if future.cancelled() and not self.dead:
+ # GreenThread is still running, so kill it:
+ self.kill()
+
+ future.add_done_callback(got_future_result)
+
+ # When the GreenThread finishes, set its result on the Future:
+ def got_gthread_result(gthread):
+ if future.done():
+ # Can't set values any more.
+ return
+
+ try:
+ # Should return immediately:
+ result = gthread.wait()
+ future.set_result(result)
+ except GreenletExit:
+ future.cancel()
+ except BaseException as e:
+ future.set_exception(e)
+
+ self.link(got_gthread_result)
+
+ return future.__await__()
+
+ def wait(self):
+ """ Returns the result of the main function of this GreenThread. If the
+ result is a normal return value, :meth:`wait` returns it. If it raised
+ an exception, :meth:`wait` will raise the same exception (though the
+ stack trace will unavoidably contain some frames from within the
+ greenthread module)."""
+ return self._exit_event.wait()
+
+ def link(self, func, *curried_args, **curried_kwargs):
+ """ Set up a function to be called with the results of the GreenThread.
+
+ The function must have the following signature::
+
+ def func(gt, [curried args/kwargs]):
+
+ When the GreenThread finishes its run, it calls *func* with itself
+ and with the `curried arguments `_ supplied
+ at link-time. If the function wants to retrieve the result of the GreenThread,
+ it should call wait() on its first argument.
+
+ Note that *func* is called within execution context of
+ the GreenThread, so it is possible to interfere with other linked
+ functions by doing things like switching explicitly to another
+ greenthread.
+ """
+ if self._exit_funcs is None:
+ self._exit_funcs = deque()
+ self._exit_funcs.append((func, curried_args, curried_kwargs))
+ if self._exit_event.ready():
+ self._resolve_links()
+
+ def unlink(self, func, *curried_args, **curried_kwargs):
+ """ remove linked function set by :meth:`link`
+
+ Remove successfully return True, otherwise False
+ """
+ if not self._exit_funcs:
+ return False
+ try:
+ self._exit_funcs.remove((func, curried_args, curried_kwargs))
+ return True
+ except ValueError:
+ return False
+
+ def main(self, function, args, kwargs):
+ try:
+ result = function(*args, **kwargs)
+ except:
+ self._exit_event.send_exception(*sys.exc_info())
+ self._resolve_links()
+ raise
+ else:
+ self._exit_event.send(result)
+ self._resolve_links()
+
+ def _resolve_links(self):
+ # ca and ckw are the curried function arguments
+ if self._resolving_links:
+ return
+ if not self._exit_funcs:
+ return
+ self._resolving_links = True
+ try:
+ while self._exit_funcs:
+ f, ca, ckw = self._exit_funcs.popleft()
+ f(self, *ca, **ckw)
+ finally:
+ self._resolving_links = False
+
+ def kill(self, *throw_args):
+ """Kills the greenthread using :func:`kill`. After being killed
+ all calls to :meth:`wait` will raise *throw_args* (which default
+ to :class:`greenlet.GreenletExit`)."""
+ return kill(self, *throw_args)
+
+ def cancel(self, *throw_args):
+ """Kills the greenthread using :func:`kill`, but only if it hasn't
+ already started running. After being canceled,
+ all calls to :meth:`wait` will raise *throw_args* (which default
+ to :class:`greenlet.GreenletExit`)."""
+ return cancel(self, *throw_args)
+
+
+def cancel(g, *throw_args):
+ """Like :func:`kill`, but only terminates the greenthread if it hasn't
+ already started execution. If the grenthread has already started
+ execution, :func:`cancel` has no effect."""
+ if not g:
+ kill(g, *throw_args)
+
+
+def kill(g, *throw_args):
+ """Terminates the target greenthread by raising an exception into it.
+ Whatever that greenthread might be doing; be it waiting for I/O or another
+ primitive, it sees an exception right away.
+
+ By default, this exception is GreenletExit, but a specific exception
+ may be specified. *throw_args* should be the same as the arguments to
+ raise; either an exception instance or an exc_info tuple.
+
+ Calling :func:`kill` causes the calling greenthread to cooperatively yield.
+ """
+ if g.dead:
+ return
+ hub = hubs.get_hub()
+ if not g:
+ # greenlet hasn't started yet and therefore throw won't work
+ # on its own; semantically we want it to be as though the main
+ # method never got called
+ def just_raise(*a, **kw):
+ if throw_args:
+ raise throw_args[1].with_traceback(throw_args[2])
+ else:
+ raise greenlet.GreenletExit()
+ g.run = just_raise
+ if isinstance(g, GreenThread):
+ # it's a GreenThread object, so we want to call its main
+ # method to take advantage of the notification
+ try:
+ g.main(just_raise, (), {})
+ except:
+ pass
+ current = getcurrent()
+ if current is not hub.greenlet:
+ # arrange to wake the caller back up immediately
+ hub.ensure_greenlet()
+ hub.schedule_call_global(0, current.switch)
+ g.throw(*throw_args)
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__init__.py b/.venv/Lib/site-packages/eventlet/hubs/__init__.py
new file mode 100644
index 0000000..b1a3e80
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/hubs/__init__.py
@@ -0,0 +1,188 @@
+import importlib
+import inspect
+import os
+import warnings
+
+from eventlet import patcher
+from eventlet.support import greenlets as greenlet
+
+
+__all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"]
+
+threading = patcher.original('threading')
+_threadlocal = threading.local()
+
+
+# order is important, get_default_hub returns first available from here
+builtin_hub_names = ('epolls', 'kqueue', 'poll', 'selects')
+builtin_hub_modules = tuple(importlib.import_module('eventlet.hubs.' + name) for name in builtin_hub_names)
+
+
+class HubError(Exception):
+ pass
+
+
+def get_default_hub():
+ """Select the default hub implementation based on what multiplexing
+ libraries are installed. The order that the hubs are tried is:
+
+ * epoll
+ * kqueue
+ * poll
+ * select
+
+ .. include:: ../../doc/source/common.txt
+ .. note :: |internal|
+ """
+ for mod in builtin_hub_modules:
+ if mod.is_available():
+ return mod
+
+ raise HubError('no built-in hubs are available: {}'.format(builtin_hub_modules))
+
+
+def use_hub(mod=None):
+ """Use the module *mod*, containing a class called Hub, as the
+ event hub. Usually not required; the default hub is usually fine.
+
+ `mod` can be an actual hub class, a module, a string, or None.
+
+ If `mod` is a class, use it directly.
+ If `mod` is a module, use `module.Hub` class
+ If `mod` is a string and contains either '.' or ':'
+ then `use_hub` uses 'package.subpackage.module:Class' convention,
+ otherwise imports `eventlet.hubs.mod`.
+ If `mod` is None, `use_hub` uses the default hub.
+
+ Only call use_hub during application initialization,
+ because it resets the hub's state and any existing
+ timers or listeners will never be resumed.
+
+ These two threadlocal attributes are not part of Eventlet public API:
+ - `threadlocal.Hub` (capital H) is hub constructor, used when no hub is currently active
+ - `threadlocal.hub` (lowercase h) is active hub instance
+ """
+ if mod is None:
+ mod = os.environ.get('EVENTLET_HUB', None)
+ if mod is None:
+ mod = get_default_hub()
+ if hasattr(_threadlocal, 'hub'):
+ del _threadlocal.hub
+
+ classname = ''
+ if isinstance(mod, str):
+ if mod.strip() == "":
+ raise RuntimeError("Need to specify a hub")
+ if '.' in mod or ':' in mod:
+ modulename, _, classname = mod.strip().partition(':')
+ else:
+ modulename = 'eventlet.hubs.' + mod
+ mod = importlib.import_module(modulename)
+
+ if hasattr(mod, 'is_available'):
+ if not mod.is_available():
+ raise Exception('selected hub is not available on this system mod={}'.format(mod))
+ else:
+ msg = '''Please provide `is_available()` function in your custom Eventlet hub {mod}.
+It must return bool: whether hub supports current platform. See eventlet/hubs/{{epoll,kqueue}} for example.
+'''.format(mod=mod)
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
+
+ hubclass = mod
+ if not inspect.isclass(mod):
+ hubclass = getattr(mod, classname or 'Hub')
+
+ _threadlocal.Hub = hubclass
+
+
+def get_hub():
+ """Get the current event hub singleton object.
+
+ .. note :: |internal|
+ """
+ try:
+ hub = _threadlocal.hub
+ except AttributeError:
+ try:
+ _threadlocal.Hub
+ except AttributeError:
+ use_hub()
+ hub = _threadlocal.hub = _threadlocal.Hub()
+ return hub
+
+
+# Lame middle file import because complex dependencies in import graph
+from eventlet import timeout
+
+
+def trampoline(fd, read=None, write=None, timeout=None,
+ timeout_exc=timeout.Timeout,
+ mark_as_closed=None):
+ """Suspend the current coroutine until the given socket object or file
+ descriptor is ready to *read*, ready to *write*, or the specified
+ *timeout* elapses, depending on arguments specified.
+
+ To wait for *fd* to be ready to read, pass *read* ``=True``; ready to
+ write, pass *write* ``=True``. To specify a timeout, pass the *timeout*
+ argument in seconds.
+
+ If the specified *timeout* elapses before the socket is ready to read or
+ write, *timeout_exc* will be raised instead of ``trampoline()``
+ returning normally.
+
+ .. note :: |internal|
+ """
+ t = None
+ hub = get_hub()
+ current = greenlet.getcurrent()
+ if hub.greenlet is current:
+ raise RuntimeError('do not call blocking functions from the mainloop')
+ if (read and write):
+ raise RuntimeError('not allowed to trampoline for reading and writing')
+ try:
+ fileno = fd.fileno()
+ except AttributeError:
+ fileno = fd
+ if timeout is not None:
+ def _timeout(exc):
+ # This is only useful to insert debugging
+ current.throw(exc)
+ t = hub.schedule_call_global(timeout, _timeout, timeout_exc)
+ try:
+ if read:
+ listener = hub.add(hub.READ, fileno, current.switch, current.throw, mark_as_closed)
+ elif write:
+ listener = hub.add(hub.WRITE, fileno, current.switch, current.throw, mark_as_closed)
+ try:
+ return hub.switch()
+ finally:
+ hub.remove(listener)
+ finally:
+ if t is not None:
+ t.cancel()
+
+
+def notify_close(fd):
+ """
+ A particular file descriptor has been explicitly closed. Register for any
+ waiting listeners to be notified on the next run loop.
+ """
+ hub = get_hub()
+ hub.notify_close(fd)
+
+
+def notify_opened(fd):
+ """
+ Some file descriptors may be closed 'silently' - that is, by the garbage
+ collector, by an external library, etc. When the OS returns a file descriptor
+ from an open call (or something similar), this may be the only indication we
+ have that the FD has been closed and then recycled.
+ We let the hub know that the old file descriptor is dead; any stuck listeners
+ will be disabled and notified in turn.
+ """
+ hub = get_hub()
+ hub.mark_as_reopened(fd)
+
+
+class IOClosed(IOError):
+ pass
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..c1d59a6
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__pycache__/asyncio.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/asyncio.cpython-312.pyc
new file mode 100644
index 0000000..259a237
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/asyncio.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__pycache__/epolls.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/epolls.cpython-312.pyc
new file mode 100644
index 0000000..da7c210
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/epolls.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__pycache__/hub.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/hub.cpython-312.pyc
new file mode 100644
index 0000000..1e7fa23
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/hub.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__pycache__/kqueue.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/kqueue.cpython-312.pyc
new file mode 100644
index 0000000..a5c1908
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/kqueue.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__pycache__/poll.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/poll.cpython-312.pyc
new file mode 100644
index 0000000..fc45110
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/poll.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__pycache__/pyevent.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/pyevent.cpython-312.pyc
new file mode 100644
index 0000000..c1fdd65
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/pyevent.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__pycache__/selects.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/selects.cpython-312.pyc
new file mode 100644
index 0000000..f3e4ef0
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/selects.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/hubs/__pycache__/timer.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/timer.cpython-312.pyc
new file mode 100644
index 0000000..011fc09
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/hubs/__pycache__/timer.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/hubs/asyncio.py b/.venv/Lib/site-packages/eventlet/hubs/asyncio.py
new file mode 100644
index 0000000..d92ebfc
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/hubs/asyncio.py
@@ -0,0 +1,168 @@
+"""
+Asyncio-based hub, originally implemented by Miguel Grinberg.
+"""
+
+import asyncio
+try:
+ import concurrent.futures.thread
+ concurrent_imported = True
+except RuntimeError:
+ # This happens in weird edge cases where asyncio hub is started at
+ # shutdown. Not much we can do if this happens.
+ concurrent_imported = False
+import os
+import sys
+
+from eventlet.hubs import hub
+from eventlet.patcher import original
+
+
+def is_available():
+ """
+ Indicate whether this hub is available, since some hubs are
+ platform-specific.
+
+ Python always has asyncio, so this is always ``True``.
+ """
+ return True
+
+
+class Hub(hub.BaseHub):
+ """An Eventlet hub implementation on top of an asyncio event loop."""
+
+ def __init__(self):
+ super().__init__()
+ # Make sure asyncio thread pools use real threads:
+ if concurrent_imported:
+ concurrent.futures.thread.threading = original("threading")
+ concurrent.futures.thread.queue = original("queue")
+
+ # Make sure select/poll/epoll/kqueue are usable by asyncio:
+ import selectors
+ selectors.select = original("select")
+
+ # Make sure DNS lookups use normal blocking API (which asyncio will run
+ # in a thread):
+ import asyncio.base_events
+ asyncio.base_events.socket = original("socket")
+
+ # The presumption is that eventlet is driving the event loop, so we
+ # want a new one we control.
+ self.loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(self.loop)
+ self.sleep_event = asyncio.Event()
+
+ def add_timer(self, timer):
+ """
+ Register a ``Timer``.
+
+ Typically not called directly by users.
+ """
+ super().add_timer(timer)
+ self.sleep_event.set()
+
+ def _file_cb(self, cb, fileno):
+ """
+ Callback called by ``asyncio`` when a file descriptor has an event.
+ """
+ try:
+ cb(fileno)
+ except self.SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_exception(fileno, sys.exc_info())
+ self.sleep_event.set()
+
+ def add(self, evtype, fileno, cb, tb, mark_as_closed):
+ """
+ Add a file descriptor of given event type to the ``Hub``. See the
+ superclass for details.
+
+ Typically not called directly by users.
+ """
+ try:
+ os.fstat(fileno)
+ except OSError:
+ raise ValueError('Invalid file descriptor')
+ already_listening = self.listeners[evtype].get(fileno) is not None
+ listener = super().add(evtype, fileno, cb, tb, mark_as_closed)
+ if not already_listening:
+ if evtype == hub.READ:
+ self.loop.add_reader(fileno, self._file_cb, cb, fileno)
+ else:
+ self.loop.add_writer(fileno, self._file_cb, cb, fileno)
+ return listener
+
+ def remove(self, listener):
+ """
+ Remove a listener from the ``Hub``. See the superclass for details.
+
+ Typically not called directly by users.
+ """
+ super().remove(listener)
+ evtype = listener.evtype
+ fileno = listener.fileno
+ if not self.listeners[evtype].get(fileno):
+ if evtype == hub.READ:
+ self.loop.remove_reader(fileno)
+ else:
+ self.loop.remove_writer(fileno)
+
+ def remove_descriptor(self, fileno):
+ """
+ Remove a file descriptor from the ``asyncio`` loop.
+
+ Typically not called directly by users.
+ """
+ have_read = self.listeners[hub.READ].get(fileno)
+ have_write = self.listeners[hub.WRITE].get(fileno)
+ super().remove_descriptor(fileno)
+ if have_read:
+ self.loop.remove_reader(fileno)
+ if have_write:
+ self.loop.remove_writer(fileno)
+
+ def run(self, *a, **kw):
+ """
+ Start the ``Hub`` running. See the superclass for details.
+ """
+ async def async_run():
+ if self.running:
+ raise RuntimeError("Already running!")
+ try:
+ self.running = True
+ self.stopping = False
+ while not self.stopping:
+ while self.closed:
+ # We ditch all of these first.
+ self.close_one()
+ self.prepare_timers()
+ if self.debug_blocking:
+ self.block_detect_pre()
+ self.fire_timers(self.clock())
+ if self.debug_blocking:
+ self.block_detect_post()
+ self.prepare_timers()
+ wakeup_when = self.sleep_until()
+ if wakeup_when is None:
+ sleep_time = self.default_sleep()
+ else:
+ sleep_time = wakeup_when - self.clock()
+ if sleep_time > 0:
+ try:
+ await asyncio.wait_for(self.sleep_event.wait(),
+ sleep_time)
+ except asyncio.TimeoutError:
+ pass
+ self.sleep_event.clear()
+ else:
+ await asyncio.sleep(0)
+ else:
+ self.timers_canceled = 0
+ del self.timers[:]
+ del self.next_timers[:]
+ finally:
+ self.running = False
+ self.stopping = False
+
+ self.loop.run_until_complete(async_run())
diff --git a/.venv/Lib/site-packages/eventlet/hubs/epolls.py b/.venv/Lib/site-packages/eventlet/hubs/epolls.py
new file mode 100644
index 0000000..770c18d
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/hubs/epolls.py
@@ -0,0 +1,31 @@
+import errno
+from eventlet import patcher, support
+from eventlet.hubs import hub, poll
+select = patcher.original('select')
+
+
+def is_available():
+ return hasattr(select, 'epoll')
+
+
+# NOTE: we rely on the fact that the epoll flag constants
+# are identical in value to the poll constants
+class Hub(poll.Hub):
+ def __init__(self, clock=None):
+ super().__init__(clock=clock)
+ self.poll = select.epoll()
+
+ def add(self, evtype, fileno, cb, tb, mac):
+ oldlisteners = bool(self.listeners[self.READ].get(fileno) or
+ self.listeners[self.WRITE].get(fileno))
+ # not super() to avoid double register()
+ listener = hub.BaseHub.add(self, evtype, fileno, cb, tb, mac)
+ try:
+ self.register(fileno, new=not oldlisteners)
+ except OSError as ex: # ignore EEXIST, #80
+ if support.get_errno(ex) != errno.EEXIST:
+ raise
+ return listener
+
+ def do_poll(self, seconds):
+ return self.poll.poll(seconds)
diff --git a/.venv/Lib/site-packages/eventlet/hubs/hub.py b/.venv/Lib/site-packages/eventlet/hubs/hub.py
new file mode 100644
index 0000000..abeee6c
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/hubs/hub.py
@@ -0,0 +1,495 @@
+import errno
+import heapq
+import math
+import signal
+import sys
+import traceback
+
+arm_alarm = None
+if hasattr(signal, 'setitimer'):
+ def alarm_itimer(seconds):
+ signal.setitimer(signal.ITIMER_REAL, seconds)
+ arm_alarm = alarm_itimer
+else:
+ try:
+ import itimer
+ arm_alarm = itimer.alarm
+ except ImportError:
+ def alarm_signal(seconds):
+ signal.alarm(math.ceil(seconds))
+ arm_alarm = alarm_signal
+
+import eventlet.hubs
+from eventlet.hubs import timer
+from eventlet.support import greenlets as greenlet
+try:
+ from monotonic import monotonic
+except ImportError:
+ from time import monotonic
+
+g_prevent_multiple_readers = True
+
+READ = "read"
+WRITE = "write"
+
+
+def closed_callback(fileno):
+ """ Used to de-fang a callback that may be triggered by a loop in BaseHub.wait
+ """
+ # No-op.
+ pass
+
+
+class FdListener:
+
+ def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
+ """ The following are required:
+ cb - the standard callback, which will switch into the
+ listening greenlet to indicate that the event waited upon
+ is ready
+ tb - a 'throwback'. This is typically greenlet.throw, used
+ to raise a signal into the target greenlet indicating that
+ an event was obsoleted by its underlying filehandle being
+ repurposed.
+ mark_as_closed - if any listener is obsoleted, this is called
+ (in the context of some other client greenlet) to alert
+ underlying filehandle-wrapping objects that they've been
+ closed.
+ """
+ assert (evtype is READ or evtype is WRITE)
+ self.evtype = evtype
+ self.fileno = fileno
+ self.cb = cb
+ self.tb = tb
+ self.mark_as_closed = mark_as_closed
+ self.spent = False
+ self.greenlet = greenlet.getcurrent()
+
+ def __repr__(self):
+ return "%s(%r, %r, %r, %r)" % (type(self).__name__, self.evtype, self.fileno,
+ self.cb, self.tb)
+ __str__ = __repr__
+
+ def defang(self):
+ self.cb = closed_callback
+ if self.mark_as_closed is not None:
+ self.mark_as_closed()
+ self.spent = True
+
+
+noop = FdListener(READ, 0, lambda x: None, lambda x: None, None)
+
+
+# in debug mode, track the call site that created the listener
+
+
+class DebugListener(FdListener):
+
+ def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
+ self.where_called = traceback.format_stack()
+ self.greenlet = greenlet.getcurrent()
+ super().__init__(evtype, fileno, cb, tb, mark_as_closed)
+
+ def __repr__(self):
+ return "DebugListener(%r, %r, %r, %r, %r, %r)\n%sEndDebugFdListener" % (
+ self.evtype,
+ self.fileno,
+ self.cb,
+ self.tb,
+ self.mark_as_closed,
+ self.greenlet,
+ ''.join(self.where_called))
+ __str__ = __repr__
+
+
+def alarm_handler(signum, frame):
+ import inspect
+ raise RuntimeError("Blocking detector ALARMED at" + str(inspect.getframeinfo(frame)))
+
+
+class BaseHub:
+ """ Base hub class for easing the implementation of subclasses that are
+ specific to a particular underlying event architecture. """
+
+ SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
+
+ READ = READ
+ WRITE = WRITE
+
+ def __init__(self, clock=None):
+ self.listeners = {READ: {}, WRITE: {}}
+ self.secondaries = {READ: {}, WRITE: {}}
+ self.closed = []
+
+ if clock is None:
+ clock = monotonic
+ self.clock = clock
+
+ self.greenlet = greenlet.greenlet(self.run)
+ self.stopping = False
+ self.running = False
+ self.timers = []
+ self.next_timers = []
+ self.lclass = FdListener
+ self.timers_canceled = 0
+ self.debug_exceptions = True
+ self.debug_blocking = False
+ self.debug_blocking_resolution = 1
+
+ def block_detect_pre(self):
+ # shortest alarm we can possibly raise is one second
+ tmp = signal.signal(signal.SIGALRM, alarm_handler)
+ if tmp != alarm_handler:
+ self._old_signal_handler = tmp
+
+ arm_alarm(self.debug_blocking_resolution)
+
+ def block_detect_post(self):
+ if (hasattr(self, "_old_signal_handler") and
+ self._old_signal_handler):
+ signal.signal(signal.SIGALRM, self._old_signal_handler)
+ signal.alarm(0)
+
+ def add(self, evtype, fileno, cb, tb, mark_as_closed):
+ """ Signals an intent to or write a particular file descriptor.
+
+ The *evtype* argument is either the constant READ or WRITE.
+
+ The *fileno* argument is the file number of the file of interest.
+
+ The *cb* argument is the callback which will be called when the file
+ is ready for reading/writing.
+
+ The *tb* argument is the throwback used to signal (into the greenlet)
+ that the file was closed.
+
+ The *mark_as_closed* is used in the context of the event hub to
+ prepare a Python object as being closed, pre-empting further
+ close operations from accidentally shutting down the wrong OS thread.
+ """
+ listener = self.lclass(evtype, fileno, cb, tb, mark_as_closed)
+ bucket = self.listeners[evtype]
+ if fileno in bucket:
+ if g_prevent_multiple_readers:
+ raise RuntimeError(
+ "Second simultaneous %s on fileno %s "
+ "detected. Unless you really know what you're doing, "
+ "make sure that only one greenthread can %s any "
+ "particular socket. Consider using a pools.Pool. "
+ "If you do know what you're doing and want to disable "
+ "this error, call "
+ "eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; "
+ "THAT THREAD=%s" % (
+ evtype, fileno, evtype, cb, bucket[fileno]))
+ # store off the second listener in another structure
+ self.secondaries[evtype].setdefault(fileno, []).append(listener)
+ else:
+ bucket[fileno] = listener
+ return listener
+
+ def _obsolete(self, fileno):
+ """ We've received an indication that 'fileno' has been obsoleted.
+ Any current listeners must be defanged, and notifications to
+ their greenlets queued up to send.
+ """
+ found = False
+ for evtype, bucket in self.secondaries.items():
+ if fileno in bucket:
+ for listener in bucket[fileno]:
+ found = True
+ self.closed.append(listener)
+ listener.defang()
+ del bucket[fileno]
+
+ # For the primary listeners, we actually need to call remove,
+ # which may modify the underlying OS polling objects.
+ for evtype, bucket in self.listeners.items():
+ if fileno in bucket:
+ listener = bucket[fileno]
+ found = True
+ self.closed.append(listener)
+ self.remove(listener)
+ listener.defang()
+
+ return found
+
+ def notify_close(self, fileno):
+ """ We might want to do something when a fileno is closed.
+ However, currently it suffices to obsolete listeners only
+ when we detect an old fileno being recycled, on open.
+ """
+ pass
+
+ def remove(self, listener):
+ if listener.spent:
+ # trampoline may trigger this in its finally section.
+ return
+
+ fileno = listener.fileno
+ evtype = listener.evtype
+ if listener is self.listeners[evtype][fileno]:
+ del self.listeners[evtype][fileno]
+ # migrate a secondary listener to be the primary listener
+ if fileno in self.secondaries[evtype]:
+ sec = self.secondaries[evtype][fileno]
+ if sec:
+ self.listeners[evtype][fileno] = sec.pop(0)
+ if not sec:
+ del self.secondaries[evtype][fileno]
+ else:
+ self.secondaries[evtype][fileno].remove(listener)
+ if not self.secondaries[evtype][fileno]:
+ del self.secondaries[evtype][fileno]
+
+ def mark_as_reopened(self, fileno):
+ """ If a file descriptor is returned by the OS as the result of some
+ open call (or equivalent), that signals that it might be being
+ recycled.
+
+ Catch the case where the fd was previously in use.
+ """
+ self._obsolete(fileno)
+
+ def remove_descriptor(self, fileno):
+ """ Completely remove all listeners for this fileno. For internal use
+ only."""
+ # gather any listeners we have
+ listeners = []
+ listeners.append(self.listeners[READ].get(fileno, noop))
+ listeners.append(self.listeners[WRITE].get(fileno, noop))
+ listeners.extend(self.secondaries[READ].get(fileno, ()))
+ listeners.extend(self.secondaries[WRITE].get(fileno, ()))
+ for listener in listeners:
+ try:
+ # listener.cb may want to remove(listener)
+ listener.cb(fileno)
+ except Exception:
+ self.squelch_generic_exception(sys.exc_info())
+ # NOW this fileno is now dead to all
+ self.listeners[READ].pop(fileno, None)
+ self.listeners[WRITE].pop(fileno, None)
+ self.secondaries[READ].pop(fileno, None)
+ self.secondaries[WRITE].pop(fileno, None)
+
+ def close_one(self):
+ """ Triggered from the main run loop. If a listener's underlying FD was
+ closed somehow, throw an exception back to the trampoline, which should
+ be able to manage it appropriately.
+ """
+ listener = self.closed.pop()
+ if not listener.greenlet.dead:
+ # There's no point signalling a greenlet that's already dead.
+ listener.tb(eventlet.hubs.IOClosed(errno.ENOTCONN, "Operation on closed file"))
+
+ def ensure_greenlet(self):
+ if self.greenlet.dead:
+ # create new greenlet sharing same parent as original
+ new = greenlet.greenlet(self.run, self.greenlet.parent)
+ # need to assign as parent of old greenlet
+ # for those greenlets that are currently
+ # children of the dead hub and may subsequently
+ # exit without further switching to hub.
+ self.greenlet.parent = new
+ self.greenlet = new
+
+ def switch(self):
+ cur = greenlet.getcurrent()
+ assert cur is not self.greenlet, 'Cannot switch to MAINLOOP from MAINLOOP'
+ switch_out = getattr(cur, 'switch_out', None)
+ if switch_out is not None:
+ try:
+ switch_out()
+ except:
+ self.squelch_generic_exception(sys.exc_info())
+ self.ensure_greenlet()
+ try:
+ if self.greenlet.parent is not cur:
+ cur.parent = self.greenlet
+ except ValueError:
+ pass # gets raised if there is a greenlet parent cycle
+ return self.greenlet.switch()
+
+ def squelch_exception(self, fileno, exc_info):
+ traceback.print_exception(*exc_info)
+ sys.stderr.write("Removing descriptor: %r\n" % (fileno,))
+ sys.stderr.flush()
+ try:
+ self.remove_descriptor(fileno)
+ except Exception as e:
+ sys.stderr.write("Exception while removing descriptor! %r\n" % (e,))
+ sys.stderr.flush()
+
+ def wait(self, seconds=None):
+ raise NotImplementedError("Implement this in a subclass")
+
+ def default_sleep(self):
+ return 60.0
+
+ def sleep_until(self):
+ t = self.timers
+ if not t:
+ return None
+ return t[0][0]
+
+ def run(self, *a, **kw):
+ """Run the runloop until abort is called.
+ """
+ # accept and discard variable arguments because they will be
+ # supplied if other greenlets have run and exited before the
+ # hub's greenlet gets a chance to run
+ if self.running:
+ raise RuntimeError("Already running!")
+ try:
+ self.running = True
+ self.stopping = False
+ while not self.stopping:
+ while self.closed:
+ # We ditch all of these first.
+ self.close_one()
+ self.prepare_timers()
+ if self.debug_blocking:
+ self.block_detect_pre()
+ self.fire_timers(self.clock())
+ if self.debug_blocking:
+ self.block_detect_post()
+ self.prepare_timers()
+ wakeup_when = self.sleep_until()
+ if wakeup_when is None:
+ sleep_time = self.default_sleep()
+ else:
+ sleep_time = wakeup_when - self.clock()
+ if sleep_time > 0:
+ self.wait(sleep_time)
+ else:
+ self.wait(0)
+ else:
+ self.timers_canceled = 0
+ del self.timers[:]
+ del self.next_timers[:]
+ finally:
+ self.running = False
+ self.stopping = False
+
+ def abort(self, wait=False):
+ """Stop the runloop. If run is executing, it will exit after
+ completing the next runloop iteration.
+
+ Set *wait* to True to cause abort to switch to the hub immediately and
+ wait until it's finished processing. Waiting for the hub will only
+ work from the main greenthread; all other greenthreads will become
+ unreachable.
+ """
+ if self.running:
+ self.stopping = True
+ if wait:
+ assert self.greenlet is not greenlet.getcurrent(
+ ), "Can't abort with wait from inside the hub's greenlet."
+ # schedule an immediate timer just so the hub doesn't sleep
+ self.schedule_call_global(0, lambda: None)
+ # switch to it; when done the hub will switch back to its parent,
+ # the main greenlet
+ self.switch()
+
+ def squelch_generic_exception(self, exc_info):
+ if self.debug_exceptions:
+ traceback.print_exception(*exc_info)
+ sys.stderr.flush()
+
+ def squelch_timer_exception(self, timer, exc_info):
+ if self.debug_exceptions:
+ traceback.print_exception(*exc_info)
+ sys.stderr.flush()
+
+ def add_timer(self, timer):
+ scheduled_time = self.clock() + timer.seconds
+ self.next_timers.append((scheduled_time, timer))
+ return scheduled_time
+
+ def timer_canceled(self, timer):
+ self.timers_canceled += 1
+ len_timers = len(self.timers) + len(self.next_timers)
+ if len_timers > 1000 and len_timers / 2 <= self.timers_canceled:
+ self.timers_canceled = 0
+ self.timers = [t for t in self.timers if not t[1].called]
+ self.next_timers = [t for t in self.next_timers if not t[1].called]
+ heapq.heapify(self.timers)
+
+ def prepare_timers(self):
+ heappush = heapq.heappush
+ t = self.timers
+ for item in self.next_timers:
+ if item[1].called:
+ self.timers_canceled -= 1
+ else:
+ heappush(t, item)
+ del self.next_timers[:]
+
+ def schedule_call_local(self, seconds, cb, *args, **kw):
+ """Schedule a callable to be called after 'seconds' seconds have
+ elapsed. Cancel the timer if greenlet has exited.
+ seconds: The number of seconds to wait.
+ cb: The callable to call after the given time.
+ *args: Arguments to pass to the callable when called.
+ **kw: Keyword arguments to pass to the callable when called.
+ """
+ t = timer.LocalTimer(seconds, cb, *args, **kw)
+ self.add_timer(t)
+ return t
+
+ def schedule_call_global(self, seconds, cb, *args, **kw):
+ """Schedule a callable to be called after 'seconds' seconds have
+ elapsed. The timer will NOT be canceled if the current greenlet has
+ exited before the timer fires.
+ seconds: The number of seconds to wait.
+ cb: The callable to call after the given time.
+ *args: Arguments to pass to the callable when called.
+ **kw: Keyword arguments to pass to the callable when called.
+ """
+ t = timer.Timer(seconds, cb, *args, **kw)
+ self.add_timer(t)
+ return t
+
+ def fire_timers(self, when):
+ t = self.timers
+ heappop = heapq.heappop
+
+ while t:
+ next = t[0]
+
+ exp = next[0]
+ timer = next[1]
+
+ if when < exp:
+ break
+
+ heappop(t)
+
+ try:
+ if timer.called:
+ self.timers_canceled -= 1
+ else:
+ timer()
+ except self.SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_timer_exception(timer, sys.exc_info())
+
+ # for debugging:
+
+ def get_readers(self):
+ return self.listeners[READ].values()
+
+ def get_writers(self):
+ return self.listeners[WRITE].values()
+
+ def get_timers_count(hub):
+ return len(hub.timers) + len(hub.next_timers)
+
+ def set_debug_listeners(self, value):
+ if value:
+ self.lclass = DebugListener
+ else:
+ self.lclass = FdListener
+
+ def set_timer_exceptions(self, value):
+ self.debug_exceptions = value
diff --git a/.venv/Lib/site-packages/eventlet/hubs/kqueue.py b/.venv/Lib/site-packages/eventlet/hubs/kqueue.py
new file mode 100644
index 0000000..9502576
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/hubs/kqueue.py
@@ -0,0 +1,110 @@
+import os
+import sys
+from eventlet import patcher, support
+from eventlet.hubs import hub
+select = patcher.original('select')
+time = patcher.original('time')
+
+
+def is_available():
+ return hasattr(select, 'kqueue')
+
+
+class Hub(hub.BaseHub):
+ MAX_EVENTS = 100
+
+ def __init__(self, clock=None):
+ self.FILTERS = {
+ hub.READ: select.KQ_FILTER_READ,
+ hub.WRITE: select.KQ_FILTER_WRITE,
+ }
+ super().__init__(clock)
+ self._events = {}
+ self._init_kqueue()
+
+ def _init_kqueue(self):
+ self.kqueue = select.kqueue()
+ self._pid = os.getpid()
+
+ def _reinit_kqueue(self):
+ self.kqueue.close()
+ self._init_kqueue()
+ events = [e for i in self._events.values()
+ for e in i.values()]
+ self.kqueue.control(events, 0, 0)
+
+ def _control(self, events, max_events, timeout):
+ try:
+ return self.kqueue.control(events, max_events, timeout)
+ except OSError:
+ # have we forked?
+ if os.getpid() != self._pid:
+ self._reinit_kqueue()
+ return self.kqueue.control(events, max_events, timeout)
+ raise
+
+ def add(self, evtype, fileno, cb, tb, mac):
+ listener = super().add(evtype, fileno, cb, tb, mac)
+ events = self._events.setdefault(fileno, {})
+ if evtype not in events:
+ try:
+ event = select.kevent(fileno, self.FILTERS.get(evtype), select.KQ_EV_ADD)
+ self._control([event], 0, 0)
+ events[evtype] = event
+ except ValueError:
+ super().remove(listener)
+ raise
+ return listener
+
+ def _delete_events(self, events):
+ del_events = [
+ select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
+ for e in events
+ ]
+ self._control(del_events, 0, 0)
+
+ def remove(self, listener):
+ super().remove(listener)
+ evtype = listener.evtype
+ fileno = listener.fileno
+ if not self.listeners[evtype].get(fileno):
+ event = self._events[fileno].pop(evtype, None)
+ if event is None:
+ return
+ try:
+ self._delete_events((event,))
+ except OSError:
+ pass
+
+ def remove_descriptor(self, fileno):
+ super().remove_descriptor(fileno)
+ try:
+ events = self._events.pop(fileno).values()
+ self._delete_events(events)
+ except KeyError:
+ pass
+ except OSError:
+ pass
+
+ def wait(self, seconds=None):
+ readers = self.listeners[self.READ]
+ writers = self.listeners[self.WRITE]
+
+ if not readers and not writers:
+ if seconds:
+ time.sleep(seconds)
+ return
+ result = self._control([], self.MAX_EVENTS, seconds)
+ SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
+ for event in result:
+ fileno = event.ident
+ evfilt = event.filter
+ try:
+ if evfilt == select.KQ_FILTER_READ:
+ readers.get(fileno, hub.noop).cb(fileno)
+ if evfilt == select.KQ_FILTER_WRITE:
+ writers.get(fileno, hub.noop).cb(fileno)
+ except SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_exception(fileno, sys.exc_info())
diff --git a/.venv/Lib/site-packages/eventlet/hubs/poll.py b/.venv/Lib/site-packages/eventlet/hubs/poll.py
new file mode 100644
index 0000000..0984214
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/hubs/poll.py
@@ -0,0 +1,118 @@
+import errno
+import sys
+
+from eventlet import patcher, support
+from eventlet.hubs import hub
+select = patcher.original('select')
+time = patcher.original('time')
+
+
+def is_available():
+ return hasattr(select, 'poll')
+
+
+class Hub(hub.BaseHub):
+ def __init__(self, clock=None):
+ super().__init__(clock)
+ self.EXC_MASK = select.POLLERR | select.POLLHUP
+ self.READ_MASK = select.POLLIN | select.POLLPRI
+ self.WRITE_MASK = select.POLLOUT
+ self.poll = select.poll()
+
+ def add(self, evtype, fileno, cb, tb, mac):
+ listener = super().add(evtype, fileno, cb, tb, mac)
+ self.register(fileno, new=True)
+ return listener
+
+ def remove(self, listener):
+ super().remove(listener)
+ self.register(listener.fileno)
+
+ def register(self, fileno, new=False):
+ mask = 0
+ if self.listeners[self.READ].get(fileno):
+ mask |= self.READ_MASK | self.EXC_MASK
+ if self.listeners[self.WRITE].get(fileno):
+ mask |= self.WRITE_MASK | self.EXC_MASK
+ try:
+ if mask:
+ if new:
+ self.poll.register(fileno, mask)
+ else:
+ try:
+ self.poll.modify(fileno, mask)
+ except OSError:
+ self.poll.register(fileno, mask)
+ else:
+ try:
+ self.poll.unregister(fileno)
+ except (KeyError, OSError):
+ # raised if we try to remove a fileno that was
+ # already removed/invalid
+ pass
+ except ValueError:
+ # fileno is bad, issue 74
+ self.remove_descriptor(fileno)
+ raise
+
+ def remove_descriptor(self, fileno):
+ super().remove_descriptor(fileno)
+ try:
+ self.poll.unregister(fileno)
+ except (KeyError, ValueError, OSError):
+ # raised if we try to remove a fileno that was
+ # already removed/invalid
+ pass
+
+ def do_poll(self, seconds):
+ # poll.poll expects integral milliseconds
+ return self.poll.poll(int(seconds * 1000.0))
+
+ def wait(self, seconds=None):
+ readers = self.listeners[self.READ]
+ writers = self.listeners[self.WRITE]
+
+ if not readers and not writers:
+ if seconds:
+ time.sleep(seconds)
+ return
+ try:
+ presult = self.do_poll(seconds)
+ except OSError as e:
+ if support.get_errno(e) == errno.EINTR:
+ return
+ raise
+ SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
+
+ if self.debug_blocking:
+ self.block_detect_pre()
+
+ # Accumulate the listeners to call back to prior to
+ # triggering any of them. This is to keep the set
+ # of callbacks in sync with the events we've just
+ # polled for. It prevents one handler from invalidating
+ # another.
+ callbacks = set()
+ noop = hub.noop # shave getattr
+ for fileno, event in presult:
+ if event & self.READ_MASK:
+ callbacks.add((readers.get(fileno, noop), fileno))
+ if event & self.WRITE_MASK:
+ callbacks.add((writers.get(fileno, noop), fileno))
+ if event & select.POLLNVAL:
+ self.remove_descriptor(fileno)
+ continue
+ if event & self.EXC_MASK:
+ callbacks.add((readers.get(fileno, noop), fileno))
+ callbacks.add((writers.get(fileno, noop), fileno))
+
+ for listener, fileno in callbacks:
+ try:
+ listener.cb(fileno)
+ except SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_exception(fileno, sys.exc_info())
+
+ if self.debug_blocking:
+ self.block_detect_post()
diff --git a/.venv/Lib/site-packages/eventlet/hubs/pyevent.py b/.venv/Lib/site-packages/eventlet/hubs/pyevent.py
new file mode 100644
index 0000000..0802243
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/hubs/pyevent.py
@@ -0,0 +1,4 @@
+raise ImportError(
+ "Eventlet pyevent hub was removed because it was not maintained."
+ " Try version 0.22.1 or older. Sorry for the inconvenience."
+)
diff --git a/.venv/Lib/site-packages/eventlet/hubs/selects.py b/.venv/Lib/site-packages/eventlet/hubs/selects.py
new file mode 100644
index 0000000..b6cf129
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/hubs/selects.py
@@ -0,0 +1,63 @@
+import errno
+import sys
+from eventlet import patcher, support
+from eventlet.hubs import hub
+select = patcher.original('select')
+time = patcher.original('time')
+
+try:
+ BAD_SOCK = {errno.EBADF, errno.WSAENOTSOCK}
+except AttributeError:
+ BAD_SOCK = {errno.EBADF}
+
+
+def is_available():
+ return hasattr(select, 'select')
+
+
+class Hub(hub.BaseHub):
+ def _remove_bad_fds(self):
+ """ Iterate through fds, removing the ones that are bad per the
+ operating system.
+ """
+ all_fds = list(self.listeners[self.READ]) + list(self.listeners[self.WRITE])
+ for fd in all_fds:
+ try:
+ select.select([fd], [], [], 0)
+ except OSError as e:
+ if support.get_errno(e) in BAD_SOCK:
+ self.remove_descriptor(fd)
+
+ def wait(self, seconds=None):
+ readers = self.listeners[self.READ]
+ writers = self.listeners[self.WRITE]
+ if not readers and not writers:
+ if seconds:
+ time.sleep(seconds)
+ return
+ reader_fds = list(readers)
+ writer_fds = list(writers)
+ all_fds = reader_fds + writer_fds
+ try:
+ r, w, er = select.select(reader_fds, writer_fds, all_fds, seconds)
+ except OSError as e:
+ if support.get_errno(e) == errno.EINTR:
+ return
+ elif support.get_errno(e) in BAD_SOCK:
+ self._remove_bad_fds()
+ return
+ else:
+ raise
+
+ for fileno in er:
+ readers.get(fileno, hub.noop).cb(fileno)
+ writers.get(fileno, hub.noop).cb(fileno)
+
+ for listeners, events in ((readers, r), (writers, w)):
+ for fileno in events:
+ try:
+ listeners.get(fileno, hub.noop).cb(fileno)
+ except self.SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_exception(fileno, sys.exc_info())
diff --git a/.venv/Lib/site-packages/eventlet/hubs/timer.py b/.venv/Lib/site-packages/eventlet/hubs/timer.py
new file mode 100644
index 0000000..2e3fd95
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/hubs/timer.py
@@ -0,0 +1,106 @@
+import traceback
+
+import eventlet.hubs
+from eventlet.support import greenlets as greenlet
+import io
+
+""" If true, captures a stack trace for each timer when constructed. This is
+useful for debugging leaking timers, to find out where the timer was set up. """
+_g_debug = False
+
+
+class Timer:
+ def __init__(self, seconds, cb, *args, **kw):
+ """Create a timer.
+ seconds: The minimum number of seconds to wait before calling
+ cb: The callback to call when the timer has expired
+ *args: The arguments to pass to cb
+ **kw: The keyword arguments to pass to cb
+
+ This timer will not be run unless it is scheduled in a runloop by
+ calling timer.schedule() or runloop.add_timer(timer).
+ """
+ self.seconds = seconds
+ self.tpl = cb, args, kw
+ self.called = False
+ if _g_debug:
+ self.traceback = io.StringIO()
+ traceback.print_stack(file=self.traceback)
+
+ @property
+ def pending(self):
+ return not self.called
+
+ def __repr__(self):
+ secs = getattr(self, 'seconds', None)
+ cb, args, kw = getattr(self, 'tpl', (None, None, None))
+ retval = "Timer(%s, %s, *%s, **%s)" % (
+ secs, cb, args, kw)
+ if _g_debug and hasattr(self, 'traceback'):
+ retval += '\n' + self.traceback.getvalue()
+ return retval
+
+ def copy(self):
+ cb, args, kw = self.tpl
+ return self.__class__(self.seconds, cb, *args, **kw)
+
+ def schedule(self):
+ """Schedule this timer to run in the current runloop.
+ """
+ self.called = False
+ self.scheduled_time = eventlet.hubs.get_hub().add_timer(self)
+ return self
+
+ def __call__(self, *args):
+ if not self.called:
+ self.called = True
+ cb, args, kw = self.tpl
+ try:
+ cb(*args, **kw)
+ finally:
+ try:
+ del self.tpl
+ except AttributeError:
+ pass
+
+ def cancel(self):
+ """Prevent this timer from being called. If the timer has already
+ been called or canceled, has no effect.
+ """
+ if not self.called:
+ self.called = True
+ eventlet.hubs.get_hub().timer_canceled(self)
+ try:
+ del self.tpl
+ except AttributeError:
+ pass
+
+ # No default ordering in 3.x. heapq uses <
+ # FIXME should full set be added?
+ def __lt__(self, other):
+ return id(self) < id(other)
+
+
+class LocalTimer(Timer):
+
+ def __init__(self, *args, **kwargs):
+ self.greenlet = greenlet.getcurrent()
+ Timer.__init__(self, *args, **kwargs)
+
+ @property
+ def pending(self):
+ if self.greenlet is None or self.greenlet.dead:
+ return False
+ return not self.called
+
+ def __call__(self, *args):
+ if not self.called:
+ self.called = True
+ if self.greenlet is not None and self.greenlet.dead:
+ return
+ cb, args, kw = self.tpl
+ cb(*args, **kw)
+
+ def cancel(self):
+ self.greenlet = None
+ Timer.cancel(self)
diff --git a/.venv/Lib/site-packages/eventlet/lock.py b/.venv/Lib/site-packages/eventlet/lock.py
new file mode 100644
index 0000000..4b21e0b
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/lock.py
@@ -0,0 +1,37 @@
+from eventlet import hubs
+from eventlet.semaphore import Semaphore
+
+
+class Lock(Semaphore):
+
+ """A lock.
+ This is API-compatible with :class:`threading.Lock`.
+
+ It is a context manager, and thus can be used in a with block::
+
+ lock = Lock()
+ with lock:
+ do_some_stuff()
+ """
+
+ def release(self, blocking=True):
+ """Modify behaviour vs :class:`Semaphore` to raise a RuntimeError
+ exception if the value is greater than zero. This corrects behaviour
+ to realign with :class:`threading.Lock`.
+ """
+ if self.counter > 0:
+ raise RuntimeError("release unlocked lock")
+
+ # Consciously *do not* call super().release(), but instead inline
+ # Semaphore.release() here. We've seen issues with logging._lock
+ # deadlocking because garbage collection happened to run mid-release
+ # and eliminating the extra stack frame should help prevent that.
+ # See https://github.com/eventlet/eventlet/issues/742
+ self.counter += 1
+ if self._waiters:
+ hubs.get_hub().schedule_call_global(0, self._do_acquire)
+ return True
+
+ def _at_fork_reinit(self):
+ self.counter = 1
+ self._waiters.clear()
diff --git a/.venv/Lib/site-packages/eventlet/patcher.py b/.venv/Lib/site-packages/eventlet/patcher.py
new file mode 100644
index 0000000..d9df000
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/patcher.py
@@ -0,0 +1,595 @@
+from __future__ import annotations
+try:
+ import _imp as imp
+except ImportError:
+ import imp
+import sys
+try:
+ # Only for this purpose, it's irrelevant if `os` was already patched.
+ # https://github.com/eventlet/eventlet/pull/661
+ from os import register_at_fork
+except ImportError:
+ register_at_fork = None
+
+import eventlet
+
+
+__all__ = ['inject', 'import_patched', 'monkey_patch', 'is_monkey_patched']
+
+__exclude = {'__builtins__', '__file__', '__name__'}
+
+
+class SysModulesSaver:
+ """Class that captures some subset of the current state of
+ sys.modules. Pass in an iterator of module names to the
+ constructor."""
+
+ def __init__(self, module_names=()):
+ self._saved = {}
+ imp.acquire_lock()
+ self.save(*module_names)
+
+ def save(self, *module_names):
+ """Saves the named modules to the object."""
+ for modname in module_names:
+ self._saved[modname] = sys.modules.get(modname, None)
+
+ def restore(self):
+ """Restores the modules that the saver knows about into
+ sys.modules.
+ """
+ try:
+ for modname, mod in self._saved.items():
+ if mod is not None:
+ sys.modules[modname] = mod
+ else:
+ try:
+ del sys.modules[modname]
+ except KeyError:
+ pass
+ finally:
+ imp.release_lock()
+
+
+def inject(module_name, new_globals, *additional_modules):
+ """Base method for "injecting" greened modules into an imported module. It
+ imports the module specified in *module_name*, arranging things so
+ that the already-imported modules in *additional_modules* are used when
+ *module_name* makes its imports.
+
+ **Note:** This function does not create or change any sys.modules item, so
+ if your greened module use code like 'sys.modules["your_module_name"]', you
+ need to update sys.modules by yourself.
+
+ *new_globals* is either None or a globals dictionary that gets populated
+ with the contents of the *module_name* module. This is useful when creating
+ a "green" version of some other module.
+
+ *additional_modules* should be a collection of two-element tuples, of the
+ form (, ). If it's not specified, a default selection of
+ name/module pairs is used, which should cover all use cases but may be
+ slower because there are inevitably redundant or unnecessary imports.
+ """
+ patched_name = '__patched_module_' + module_name
+ if patched_name in sys.modules:
+ # returning already-patched module so as not to destroy existing
+ # references to patched modules
+ return sys.modules[patched_name]
+
+ if not additional_modules:
+ # supply some defaults
+ additional_modules = (
+ _green_os_modules() +
+ _green_select_modules() +
+ _green_socket_modules() +
+ _green_thread_modules() +
+ _green_time_modules())
+ # _green_MySQLdb()) # enable this after a short baking-in period
+
+ # after this we are gonna screw with sys.modules, so capture the
+ # state of all the modules we're going to mess with, and lock
+ saver = SysModulesSaver([name for name, m in additional_modules])
+ saver.save(module_name)
+
+ # Cover the target modules so that when you import the module it
+ # sees only the patched versions
+ for name, mod in additional_modules:
+ sys.modules[name] = mod
+
+ # Remove the old module from sys.modules and reimport it while
+ # the specified modules are in place
+ sys.modules.pop(module_name, None)
+ # Also remove sub modules and reimport. Use copy the keys to list
+ # because of the pop operations will change the content of sys.modules
+ # within th loop
+ for imported_module_name in list(sys.modules.keys()):
+ if imported_module_name.startswith(module_name + '.'):
+ sys.modules.pop(imported_module_name, None)
+ try:
+ module = __import__(module_name, {}, {}, module_name.split('.')[:-1])
+
+ if new_globals is not None:
+ # Update the given globals dictionary with everything from this new module
+ for name in dir(module):
+ if name not in __exclude:
+ new_globals[name] = getattr(module, name)
+
+ # Keep a reference to the new module to prevent it from dying
+ sys.modules[patched_name] = module
+ finally:
+ saver.restore() # Put the original modules back
+
+ return module
+
+
+def import_patched(module_name, *additional_modules, **kw_additional_modules):
+ """Imports a module in a way that ensures that the module uses "green"
+ versions of the standard library modules, so that everything works
+ nonblockingly.
+
+ The only required argument is the name of the module to be imported.
+ """
+ return inject(
+ module_name,
+ None,
+ *additional_modules + tuple(kw_additional_modules.items()))
+
+
+def patch_function(func, *additional_modules):
+ """Decorator that returns a version of the function that patches
+ some modules for the duration of the function call. This is
+ deeply gross and should only be used for functions that import
+ network libraries within their function bodies that there is no
+ way of getting around."""
+ if not additional_modules:
+ # supply some defaults
+ additional_modules = (
+ _green_os_modules() +
+ _green_select_modules() +
+ _green_socket_modules() +
+ _green_thread_modules() +
+ _green_time_modules())
+
+ def patched(*args, **kw):
+ saver = SysModulesSaver()
+ for name, mod in additional_modules:
+ saver.save(name)
+ sys.modules[name] = mod
+ try:
+ return func(*args, **kw)
+ finally:
+ saver.restore()
+ return patched
+
+
+def _original_patch_function(func, *module_names):
+ """Kind of the contrapositive of patch_function: decorates a
+ function such that when it's called, sys.modules is populated only
+ with the unpatched versions of the specified modules. Unlike
+ patch_function, only the names of the modules need be supplied,
+ and there are no defaults. This is a gross hack; tell your kids not
+ to import inside function bodies!"""
+ def patched(*args, **kw):
+ saver = SysModulesSaver(module_names)
+ for name in module_names:
+ sys.modules[name] = original(name)
+ try:
+ return func(*args, **kw)
+ finally:
+ saver.restore()
+ return patched
+
+
+def original(modname):
+ """ This returns an unpatched version of a module; this is useful for
+ Eventlet itself (i.e. tpool)."""
+ # note that it's not necessary to temporarily install unpatched
+ # versions of all patchable modules during the import of the
+ # module; this is because none of them import each other, except
+ # for threading which imports thread
+ original_name = '__original_module_' + modname
+ if original_name in sys.modules:
+ return sys.modules.get(original_name)
+
+ # re-import the "pure" module and store it in the global _originals
+ # dict; be sure to restore whatever module had that name already
+ saver = SysModulesSaver((modname,))
+ sys.modules.pop(modname, None)
+ # some rudimentary dependency checking -- fortunately the modules
+ # we're working on don't have many dependencies so we can just do
+ # some special-casing here
+ deps = {'threading': '_thread', 'queue': 'threading'}
+ if modname in deps:
+ dependency = deps[modname]
+ saver.save(dependency)
+ sys.modules[dependency] = original(dependency)
+ try:
+ real_mod = __import__(modname, {}, {}, modname.split('.')[:-1])
+ if modname in ('Queue', 'queue') and not hasattr(real_mod, '_threading'):
+ # tricky hack: Queue's constructor in <2.7 imports
+ # threading on every instantiation; therefore we wrap
+ # it so that it always gets the original threading
+ real_mod.Queue.__init__ = _original_patch_function(
+ real_mod.Queue.__init__,
+ 'threading')
+ # save a reference to the unpatched module so it doesn't get lost
+ sys.modules[original_name] = real_mod
+ finally:
+ saver.restore()
+
+ return sys.modules[original_name]
+
+
+already_patched = {}
+
+
+def monkey_patch(**on):
+ """Globally patches certain system modules to be greenthread-friendly.
+
+ The keyword arguments afford some control over which modules are patched.
+ If no keyword arguments are supplied, all possible modules are patched.
+ If keywords are set to True, only the specified modules are patched. E.g.,
+ ``monkey_patch(socket=True, select=True)`` patches only the select and
+ socket modules. Most arguments patch the single module of the same name
+ (os, time, select). The exceptions are socket, which also patches the ssl
+ module if present; and thread, which patches thread, threading, and Queue.
+
+ It's safe to call monkey_patch multiple times.
+ """
+
+ # Workaround for import cycle observed as following in monotonic
+ # RuntimeError: no suitable implementation for this system
+ # see https://github.com/eventlet/eventlet/issues/401#issuecomment-325015989
+ #
+ # Make sure the hub is completely imported before any
+ # monkey-patching, or we risk recursion if the process of importing
+ # the hub calls into monkey-patched modules.
+ eventlet.hubs.get_hub()
+
+ accepted_args = {'os', 'select', 'socket',
+ 'thread', 'time', 'psycopg', 'MySQLdb',
+ 'builtins', 'subprocess'}
+ # To make sure only one of them is passed here
+ assert not ('__builtin__' in on and 'builtins' in on)
+ try:
+ b = on.pop('__builtin__')
+ except KeyError:
+ pass
+ else:
+ on['builtins'] = b
+
+ default_on = on.pop("all", None)
+
+ for k in on.keys():
+ if k not in accepted_args:
+ raise TypeError("monkey_patch() got an unexpected "
+ "keyword argument %r" % k)
+ if default_on is None:
+ default_on = True not in on.values()
+ for modname in accepted_args:
+ if modname == 'MySQLdb':
+ # MySQLdb is only on when explicitly patched for the moment
+ on.setdefault(modname, False)
+ if modname == 'builtins':
+ on.setdefault(modname, False)
+ on.setdefault(modname, default_on)
+
+ if on['thread'] and not already_patched.get('thread'):
+ _green_existing_locks()
+
+ modules_to_patch = []
+ for name, modules_function in [
+ ('os', _green_os_modules),
+ ('select', _green_select_modules),
+ ('socket', _green_socket_modules),
+ ('thread', _green_thread_modules),
+ ('time', _green_time_modules),
+ ('MySQLdb', _green_MySQLdb),
+ ('builtins', _green_builtins),
+ ('subprocess', _green_subprocess_modules),
+ ]:
+ if on[name] and not already_patched.get(name):
+ modules_to_patch += modules_function()
+ already_patched[name] = True
+
+ if on['psycopg'] and not already_patched.get('psycopg'):
+ try:
+ from eventlet.support import psycopg2_patcher
+ psycopg2_patcher.make_psycopg_green()
+ already_patched['psycopg'] = True
+ except ImportError:
+ # note that if we get an importerror from trying to
+ # monkeypatch psycopg, we will continually retry it
+ # whenever monkey_patch is called; this should not be a
+ # performance problem but it allows is_monkey_patched to
+ # tell us whether or not we succeeded
+ pass
+
+ _threading = original('threading')
+ imp.acquire_lock()
+ try:
+ for name, mod in modules_to_patch:
+ orig_mod = sys.modules.get(name)
+ if orig_mod is None:
+ orig_mod = __import__(name)
+ for attr_name in mod.__patched__:
+ patched_attr = getattr(mod, attr_name, None)
+ if patched_attr is not None:
+ setattr(orig_mod, attr_name, patched_attr)
+ deleted = getattr(mod, '__deleted__', [])
+ for attr_name in deleted:
+ if hasattr(orig_mod, attr_name):
+ delattr(orig_mod, attr_name)
+
+ # https://github.com/eventlet/eventlet/issues/592
+ if name == 'threading' and register_at_fork:
+ def fix_threading_active(
+ _global_dict=_threading.current_thread.__globals__,
+ # alias orig_mod as patched to reflect its new state
+ # https://github.com/eventlet/eventlet/pull/661#discussion_r509877481
+ _patched=orig_mod,
+ ):
+ _prefork_active = [None]
+
+ def before_fork():
+ _prefork_active[0] = _global_dict['_active']
+ _global_dict['_active'] = _patched._active
+
+ def after_fork():
+ _global_dict['_active'] = _prefork_active[0]
+
+ register_at_fork(
+ before=before_fork,
+ after_in_parent=after_fork)
+ fix_threading_active()
+ finally:
+ imp.release_lock()
+
+ import importlib._bootstrap
+ thread = original('_thread')
+ # importlib must use real thread locks, not eventlet.Semaphore
+ importlib._bootstrap._thread = thread
+
+ # Issue #185: Since Python 3.3, threading.RLock is implemented in C and
+ # so call a C function to get the thread identifier, instead of calling
+ # threading.get_ident(). Force the Python implementation of RLock which
+ # calls threading.get_ident() and so is compatible with eventlet.
+ import threading
+ threading.RLock = threading._PyRLock
+
+ # Issue #508: Since Python 3.7 queue.SimpleQueue is implemented in C,
+ # causing a deadlock. Replace the C implementation with the Python one.
+ import queue
+ queue.SimpleQueue = queue._PySimpleQueue
+
+
+def is_monkey_patched(module):
+ """Returns True if the given module is monkeypatched currently, False if
+ not. *module* can be either the module itself or its name.
+
+ Based entirely off the name of the module, so if you import a
+ module some other way than with the import keyword (including
+ import_patched), this might not be correct about that particular
+ module."""
+ return module in already_patched or \
+ getattr(module, '__name__', None) in already_patched
+
+
+def _green_existing_locks():
+ """Make locks created before monkey-patching safe.
+
+ RLocks rely on a Lock and on Python 2, if an unpatched Lock blocks, it
+ blocks the native thread. We need to replace these with green Locks.
+
+ This was originally noticed in the stdlib logging module."""
+ import gc
+ import os
+ import threading
+ import eventlet.green.thread
+ rlock_type = type(threading.RLock())
+
+ # We're monkey-patching so there can't be any greenlets yet, ergo our thread
+ # ID is the only valid owner possible.
+ tid = eventlet.green.thread.get_ident()
+
+ # Now, upgrade all instances:
+ def upgrade(old_lock):
+ return _convert_py3_rlock(old_lock, tid)
+
+ _upgrade_instances(sys.modules, rlock_type, upgrade)
+
+ # Report if there are RLocks we couldn't upgrade. For cases where we're
+ # using coverage.py in parent process, and more generally for tests in
+ # general, this is difficult to ensure, so just don't complain in that case.
+ if "PYTEST_CURRENT_TEST" in os.environ:
+ return
+ # On older Pythons (< 3.10), gc.get_objects() won't return any RLock
+ # instances, so this warning won't get logged on older Pythons. However,
+ # it's a useful warning, so we try to do it anyway for the benefit of those
+ # users on 3.10 or later.
+ gc.collect()
+ remaining_rlocks = len({o for o in gc.get_objects() if isinstance(o, rlock_type)})
+ if remaining_rlocks:
+ import logging
+ logger = logging.Logger("eventlet")
+ logger.error("{} RLock(s) were not greened,".format(remaining_rlocks) +
+ " to fix this error make sure you run eventlet.monkey_patch() " +
+ "before importing any other modules.")
+
+
+def _upgrade_instances(container, klass, upgrade, visited=None, old_to_new=None):
+ """
+ Starting with a Python object, find all instances of ``klass``, following
+ references in ``dict`` values, ``list`` items, and attributes.
+
+ Once an object is found, replace all instances with
+ ``upgrade(found_object)``, again limited to the criteria above.
+
+ In practice this is used only for ``threading.RLock``, so we can assume
+ instances are hashable.
+ """
+ if visited is None:
+ visited = {} # map id(obj) to obj
+ if old_to_new is None:
+ old_to_new = {} # map old klass instance to upgrade(old)
+
+ # Handle circular references:
+ visited[id(container)] = container
+
+ def upgrade_or_traverse(obj):
+ if id(obj) in visited:
+ return None
+ if isinstance(obj, klass):
+ if obj in old_to_new:
+ return old_to_new[obj]
+ else:
+ new = upgrade(obj)
+ old_to_new[obj] = new
+ return new
+ else:
+ _upgrade_instances(obj, klass, upgrade, visited, old_to_new)
+ return None
+
+ if isinstance(container, dict):
+ for k, v in list(container.items()):
+ new = upgrade_or_traverse(v)
+ if new is not None:
+ container[k] = new
+ if isinstance(container, list):
+ for i, v in enumerate(container):
+ new = upgrade_or_traverse(v)
+ if new is not None:
+ container[i] = new
+ try:
+ container_vars = vars(container)
+ except TypeError:
+ pass
+ else:
+ # If we get here, we're operating on an object that could
+ # be doing strange things. If anything bad happens, error and
+ # warn the eventlet user to monkey_patch earlier.
+ try:
+ for k, v in list(container_vars.items()):
+ new = upgrade_or_traverse(v)
+ if new is not None:
+ setattr(container, k, new)
+ except:
+ import logging
+ logger = logging.Logger("eventlet")
+ logger.exception("An exception was thrown while monkey_patching for eventlet. "
+ "to fix this error make sure you run eventlet.monkey_patch() "
+ "before importing any other modules.", exc_info=True)
+
+
+def _convert_py3_rlock(old, tid):
+ """
+ Convert a normal RLock to one implemented in Python.
+
+ This is necessary to make RLocks work with eventlet, but also introduces
+ bugs, e.g. https://bugs.python.org/issue13697. So more of a downgrade,
+ really.
+ """
+ import threading
+ from eventlet.green.thread import allocate_lock
+ new = threading._PyRLock()
+ if not hasattr(new, "_block") or not hasattr(new, "_owner"):
+ # These will only fail if Python changes its internal implementation of
+ # _PyRLock:
+ raise RuntimeError(
+ "INTERNAL BUG. Perhaps you are using a major version " +
+ "of Python that is unsupported by eventlet? Please file a bug " +
+ "at https://github.com/eventlet/eventlet/issues/new")
+ new._block = allocate_lock()
+ acquired = False
+ while old._is_owned():
+ old.release()
+ new.acquire()
+ acquired = True
+ if old._is_owned():
+ new.acquire()
+ acquired = True
+ if acquired:
+ new._owner = tid
+ return new
+
+
+def _green_os_modules():
+ from eventlet.green import os
+ return [('os', os)]
+
+
+def _green_select_modules():
+ from eventlet.green import select
+ modules = [('select', select)]
+
+ from eventlet.green import selectors
+ modules.append(('selectors', selectors))
+
+ return modules
+
+
+def _green_socket_modules():
+ from eventlet.green import socket
+ try:
+ from eventlet.green import ssl
+ return [('socket', socket), ('ssl', ssl)]
+ except ImportError:
+ return [('socket', socket)]
+
+
+def _green_subprocess_modules():
+ from eventlet.green import subprocess
+ return [('subprocess', subprocess)]
+
+
+def _green_thread_modules():
+ from eventlet.green import Queue
+ from eventlet.green import thread
+ from eventlet.green import threading
+ return [('queue', Queue), ('_thread', thread), ('threading', threading)]
+
+
+def _green_time_modules():
+ from eventlet.green import time
+ return [('time', time)]
+
+
+def _green_MySQLdb():
+ try:
+ from eventlet.green import MySQLdb
+ return [('MySQLdb', MySQLdb)]
+ except ImportError:
+ return []
+
+
+def _green_builtins():
+ try:
+ from eventlet.green import builtin
+ return [('builtins', builtin)]
+ except ImportError:
+ return []
+
+
+def slurp_properties(source, destination, ignore=[], srckeys=None):
+ """Copy properties from *source* (assumed to be a module) to
+ *destination* (assumed to be a dict).
+
+ *ignore* lists properties that should not be thusly copied.
+ *srckeys* is a list of keys to copy, if the source's __all__ is
+ untrustworthy.
+ """
+ if srckeys is None:
+ srckeys = source.__all__
+ destination.update({
+ name: getattr(source, name)
+ for name in srckeys
+ if not (name.startswith('__') or name in ignore)
+ })
+
+
+if __name__ == "__main__":
+ sys.argv.pop(0)
+ monkey_patch()
+ with open(sys.argv[0]) as f:
+ code = compile(f.read(), sys.argv[0], 'exec')
+ exec(code)
diff --git a/.venv/Lib/site-packages/eventlet/pools.py b/.venv/Lib/site-packages/eventlet/pools.py
new file mode 100644
index 0000000..a65f174
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/pools.py
@@ -0,0 +1,184 @@
+import collections
+from contextlib import contextmanager
+
+from eventlet import queue
+
+
+__all__ = ['Pool', 'TokenPool']
+
+
+class Pool:
+ """
+ Pool class implements resource limitation and construction.
+
+ There are two ways of using Pool: passing a `create` argument or
+ subclassing. In either case you must provide a way to create
+ the resource.
+
+ When using `create` argument, pass a function with no arguments::
+
+ http_pool = pools.Pool(create=httplib2.Http)
+
+ If you need to pass arguments, build a nullary function with either
+ `lambda` expression::
+
+ http_pool = pools.Pool(create=lambda: httplib2.Http(timeout=90))
+
+ or :func:`functools.partial`::
+
+ from functools import partial
+ http_pool = pools.Pool(create=partial(httplib2.Http, timeout=90))
+
+ When subclassing, define only the :meth:`create` method
+ to implement the desired resource::
+
+ class MyPool(pools.Pool):
+ def create(self):
+ return MyObject()
+
+ If using 2.5 or greater, the :meth:`item` method acts as a context manager;
+ that's the best way to use it::
+
+ with mypool.item() as thing:
+ thing.dostuff()
+
+ The maximum size of the pool can be modified at runtime via
+ the :meth:`resize` method.
+
+ Specifying a non-zero *min-size* argument pre-populates the pool with
+ *min_size* items. *max-size* sets a hard limit to the size of the pool --
+ it cannot contain any more items than *max_size*, and if there are already
+ *max_size* items 'checked out' of the pool, the pool will cause any
+ greenthread calling :meth:`get` to cooperatively yield until an item
+ is :meth:`put` in.
+ """
+
+ def __init__(self, min_size=0, max_size=4, order_as_stack=False, create=None):
+ """*order_as_stack* governs the ordering of the items in the free pool.
+ If ``False`` (the default), the free items collection (of items that
+ were created and were put back in the pool) acts as a round-robin,
+ giving each item approximately equal utilization. If ``True``, the
+ free pool acts as a FILO stack, which preferentially re-uses items that
+ have most recently been used.
+ """
+ self.min_size = min_size
+ self.max_size = max_size
+ self.order_as_stack = order_as_stack
+ self.current_size = 0
+ self.channel = queue.LightQueue(0)
+ self.free_items = collections.deque()
+ if create is not None:
+ self.create = create
+
+ for x in range(min_size):
+ self.current_size += 1
+ self.free_items.append(self.create())
+
+ def get(self):
+ """Return an item from the pool, when one is available. This may
+ cause the calling greenthread to block.
+ """
+ if self.free_items:
+ return self.free_items.popleft()
+ self.current_size += 1
+ if self.current_size <= self.max_size:
+ try:
+ created = self.create()
+ except:
+ self.current_size -= 1
+ raise
+ return created
+ self.current_size -= 1 # did not create
+ return self.channel.get()
+
+ @contextmanager
+ def item(self):
+ """ Get an object out of the pool, for use with with statement.
+
+ >>> from eventlet import pools
+ >>> pool = pools.TokenPool(max_size=4)
+ >>> with pool.item() as obj:
+ ... print("got token")
+ ...
+ got token
+ >>> pool.free()
+ 4
+ """
+ obj = self.get()
+ try:
+ yield obj
+ finally:
+ self.put(obj)
+
+ def put(self, item):
+ """Put an item back into the pool, when done. This may
+ cause the putting greenthread to block.
+ """
+ if self.current_size > self.max_size:
+ self.current_size -= 1
+ return
+
+ if self.waiting():
+ try:
+ self.channel.put(item, block=False)
+ return
+ except queue.Full:
+ pass
+
+ if self.order_as_stack:
+ self.free_items.appendleft(item)
+ else:
+ self.free_items.append(item)
+
+ def resize(self, new_size):
+ """Resize the pool to *new_size*.
+
+ Adjusting this number does not affect existing items checked out of
+ the pool, nor on any greenthreads who are waiting for an item to free
+ up. Some indeterminate number of :meth:`get`/:meth:`put`
+ cycles will be necessary before the new maximum size truly matches
+ the actual operation of the pool.
+ """
+ self.max_size = new_size
+
+ def free(self):
+ """Return the number of free items in the pool. This corresponds
+ to the number of :meth:`get` calls needed to empty the pool.
+ """
+ return len(self.free_items) + self.max_size - self.current_size
+
+ def waiting(self):
+ """Return the number of routines waiting for a pool item.
+ """
+ return max(0, self.channel.getting() - self.channel.putting())
+
+ def create(self):
+ """Generate a new pool item. In order for the pool to
+ function, either this method must be overriden in a subclass
+ or the pool must be constructed with the `create` argument.
+ It accepts no arguments and returns a single instance of
+ whatever thing the pool is supposed to contain.
+
+ In general, :meth:`create` is called whenever the pool exceeds its
+ previous high-water mark of concurrently-checked-out-items. In other
+ words, in a new pool with *min_size* of 0, the very first call
+ to :meth:`get` will result in a call to :meth:`create`. If the first
+ caller calls :meth:`put` before some other caller calls :meth:`get`,
+ then the first item will be returned, and :meth:`create` will not be
+ called a second time.
+ """
+ raise NotImplementedError("Implement in subclass")
+
+
+class Token:
+ pass
+
+
+class TokenPool(Pool):
+ """A pool which gives out tokens (opaque unique objects), which indicate
+ that the coroutine which holds the token has a right to consume some
+ limited resource.
+ """
+
+ def create(self):
+ return Token()
diff --git a/.venv/Lib/site-packages/eventlet/queue.py b/.venv/Lib/site-packages/eventlet/queue.py
new file mode 100644
index 0000000..2ee071c
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/queue.py
@@ -0,0 +1,490 @@
+# Copyright (c) 2009 Denis Bilenko, denis.bilenko at gmail com
+# Copyright (c) 2010 Eventlet Contributors (see AUTHORS)
+# and licensed under the MIT license:
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+"""Synchronized queues.
+
+The :mod:`eventlet.queue` module implements multi-producer, multi-consumer
+queues that work across greenlets, with the API similar to the classes found in
+the standard :mod:`Queue` and :class:`multiprocessing `
+modules.
+
+A major difference is that queues in this module operate as channels when
+initialized with *maxsize* of zero. In such case, both :meth:`Queue.empty`
+and :meth:`Queue.full` return ``True`` and :meth:`Queue.put` always blocks until
+a call to :meth:`Queue.get` retrieves the item.
+
+An interesting difference, made possible because of greenthreads, is
+that :meth:`Queue.qsize`, :meth:`Queue.empty`, and :meth:`Queue.full` *can* be
+used as indicators of whether the subsequent :meth:`Queue.get`
+or :meth:`Queue.put` will not block. The new methods :meth:`Queue.getting`
+and :meth:`Queue.putting` report on the number of greenthreads blocking
+in :meth:`put ` or :meth:`get ` respectively.
+"""
+
+import sys
+import heapq
+import collections
+import traceback
+
+from eventlet.event import Event
+from eventlet.greenthread import getcurrent
+from eventlet.hubs import get_hub
+import queue as Stdlib_Queue
+from eventlet.timeout import Timeout
+
+
+__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'LightQueue', 'Full', 'Empty']
+
+_NONE = object()
+Full = Stdlib_Queue.Full
+Empty = Stdlib_Queue.Empty
+
+
+class Waiter:
+ """A low level synchronization class.
+
+ Wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them safe:
+
+ * switching will occur only if the waiting greenlet is executing :meth:`wait`
+ method currently. Otherwise, :meth:`switch` and :meth:`throw` are no-ops.
+ * any error raised in the greenlet is handled inside :meth:`switch` and :meth:`throw`
+
+ The :meth:`switch` and :meth:`throw` methods must only be called from the :class:`Hub` greenlet.
+ The :meth:`wait` method must be called from a greenlet other than :class:`Hub`.
+ """
+ __slots__ = ['greenlet']
+
+ def __init__(self):
+ self.greenlet = None
+
+ def __repr__(self):
+ if self.waiting:
+ waiting = ' waiting'
+ else:
+ waiting = ''
+ return '<%s at %s%s greenlet=%r>' % (
+ type(self).__name__, hex(id(self)), waiting, self.greenlet,
+ )
+
+ def __str__(self):
+ """
+ >>> print(Waiter())
+
+ """
+ if self.waiting:
+ waiting = ' waiting'
+ else:
+ waiting = ''
+ return '<%s%s greenlet=%s>' % (type(self).__name__, waiting, self.greenlet)
+
+ def __nonzero__(self):
+ return self.greenlet is not None
+
+ __bool__ = __nonzero__
+
+ @property
+ def waiting(self):
+ return self.greenlet is not None
+
+ def switch(self, value=None):
+ """Wake up the greenlet that is calling wait() currently (if there is one).
+ Can only be called from Hub's greenlet.
+ """
+ assert getcurrent() is get_hub(
+ ).greenlet, "Can only use Waiter.switch method from the mainloop"
+ if self.greenlet is not None:
+ try:
+ self.greenlet.switch(value)
+ except Exception:
+ traceback.print_exc()
+
+ def throw(self, *throw_args):
+ """Make greenlet calling wait() wake up (if there is a wait()).
+ Can only be called from Hub's greenlet.
+ """
+ assert getcurrent() is get_hub(
+ ).greenlet, "Can only use Waiter.switch method from the mainloop"
+ if self.greenlet is not None:
+ try:
+ self.greenlet.throw(*throw_args)
+ except Exception:
+ traceback.print_exc()
+
+ # XXX should be renamed to get() ? and the whole class is called Receiver?
+ def wait(self):
+ """Wait until switch() or throw() is called.
+ """
+ assert self.greenlet is None, 'This Waiter is already used by %r' % (self.greenlet, )
+ self.greenlet = getcurrent()
+ try:
+ return get_hub().switch()
+ finally:
+ self.greenlet = None
+
+
+class LightQueue:
+ """
+ This is a variant of Queue that behaves mostly like the standard
+ :class:`Stdlib_Queue`. It differs by not supporting the
+ :meth:`task_done ` or
+ :meth:`join ` methods, and is a little faster for
+ not having that overhead.
+ """
+
+ def __init__(self, maxsize=None):
+ if maxsize is None or maxsize < 0: # None is not comparable in 3.x
+ self.maxsize = None
+ else:
+ self.maxsize = maxsize
+ self.getters = set()
+ self.putters = set()
+ self._event_unlock = None
+ self._init(maxsize)
+
+ # QQQ make maxsize into a property with setter that schedules unlock if necessary
+
+ def _init(self, maxsize):
+ self.queue = collections.deque()
+
+ def _get(self):
+ return self.queue.popleft()
+
+ def _put(self, item):
+ self.queue.append(item)
+
+ def __repr__(self):
+ return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
+
+ def __str__(self):
+ return '<%s %s>' % (type(self).__name__, self._format())
+
+ def _format(self):
+ result = 'maxsize=%r' % (self.maxsize, )
+ if getattr(self, 'queue', None):
+ result += ' queue=%r' % self.queue
+ if self.getters:
+ result += ' getters[%s]' % len(self.getters)
+ if self.putters:
+ result += ' putters[%s]' % len(self.putters)
+ if self._event_unlock is not None:
+ result += ' unlocking'
+ return result
+
+ def qsize(self):
+ """Return the size of the queue."""
+ return len(self.queue)
+
+ def resize(self, size):
+ """Resizes the queue's maximum size.
+
+ If the size is increased, and there are putters waiting, they may be woken up."""
+ # None is not comparable in 3.x
+ if self.maxsize is not None and (size is None or size > self.maxsize):
+ # Maybe wake some stuff up
+ self._schedule_unlock()
+ self.maxsize = size
+
+ def putting(self):
+ """Returns the number of greenthreads that are blocked waiting to put
+ items into the queue."""
+ return len(self.putters)
+
+ def getting(self):
+ """Returns the number of greenthreads that are blocked waiting on an
+ empty queue."""
+ return len(self.getters)
+
+ def empty(self):
+ """Return ``True`` if the queue is empty, ``False`` otherwise."""
+ return not self.qsize()
+
+ def full(self):
+ """Return ``True`` if the queue is full, ``False`` otherwise.
+
+ ``Queue(None)`` is never full.
+ """
+ # None is not comparable in 3.x
+ return self.maxsize is not None and self.qsize() >= self.maxsize
+
+ def put(self, item, block=True, timeout=None):
+ """Put an item into the queue.
+
+ If optional arg *block* is true and *timeout* is ``None`` (the default),
+ block if necessary until a free slot is available. If *timeout* is
+ a positive number, it blocks at most *timeout* seconds and raises
+ the :class:`Full` exception if no free slot was available within that time.
+ Otherwise (*block* is false), put an item on the queue if a free slot
+ is immediately available, else raise the :class:`Full` exception (*timeout*
+ is ignored in that case).
+ """
+ if self.maxsize is None or self.qsize() < self.maxsize:
+ # there's a free slot, put an item right away
+ self._put(item)
+ if self.getters:
+ self._schedule_unlock()
+ elif not block and get_hub().greenlet is getcurrent():
+ # we're in the mainloop, so we cannot wait; we can switch() to other greenlets though
+ # find a getter and deliver an item to it
+ while self.getters:
+ getter = self.getters.pop()
+ if getter:
+ self._put(item)
+ item = self._get()
+ getter.switch(item)
+ return
+ raise Full
+ elif block:
+ waiter = ItemWaiter(item, block)
+ self.putters.add(waiter)
+ timeout = Timeout(timeout, Full)
+ try:
+ if self.getters:
+ self._schedule_unlock()
+ result = waiter.wait()
+ assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
+ if waiter.item is not _NONE:
+ self._put(item)
+ finally:
+ timeout.cancel()
+ self.putters.discard(waiter)
+ elif self.getters:
+ waiter = ItemWaiter(item, block)
+ self.putters.add(waiter)
+ self._schedule_unlock()
+ result = waiter.wait()
+ assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
+ if waiter.item is not _NONE:
+ raise Full
+ else:
+ raise Full
+
+ def put_nowait(self, item):
+ """Put an item into the queue without blocking.
+
+ Only enqueue the item if a free slot is immediately available.
+ Otherwise raise the :class:`Full` exception.
+ """
+ self.put(item, False)
+
+ def get(self, block=True, timeout=None):
+ """Remove and return an item from the queue.
+
+ If optional args *block* is true and *timeout* is ``None`` (the default),
+ block if necessary until an item is available. If *timeout* is a positive number,
+ it blocks at most *timeout* seconds and raises the :class:`Empty` exception
+ if no item was available within that time. Otherwise (*block* is false), return
+ an item if one is immediately available, else raise the :class:`Empty` exception
+ (*timeout* is ignored in that case).
+ """
+ if self.qsize():
+ if self.putters:
+ self._schedule_unlock()
+ return self._get()
+ elif not block and get_hub().greenlet is getcurrent():
+ # special case to make get_nowait() runnable in the mainloop greenlet
+ # there are no items in the queue; try to fix the situation by unlocking putters
+ while self.putters:
+ putter = self.putters.pop()
+ if putter:
+ putter.switch(putter)
+ if self.qsize():
+ return self._get()
+ raise Empty
+ elif block:
+ waiter = Waiter()
+ timeout = Timeout(timeout, Empty)
+ try:
+ self.getters.add(waiter)
+ if self.putters:
+ self._schedule_unlock()
+ try:
+ return waiter.wait()
+ except:
+ self._schedule_unlock()
+ raise
+ finally:
+ self.getters.discard(waiter)
+ timeout.cancel()
+ else:
+ raise Empty
+
+ def get_nowait(self):
+ """Remove and return an item from the queue without blocking.
+
+ Only get an item if one is immediately available. Otherwise
+ raise the :class:`Empty` exception.
+ """
+ return self.get(False)
+
+ def _unlock(self):
+ try:
+ while True:
+ if self.qsize() and self.getters:
+ getter = self.getters.pop()
+ if getter:
+ try:
+ item = self._get()
+ except:
+ getter.throw(*sys.exc_info())
+ else:
+ getter.switch(item)
+ elif self.putters and self.getters:
+ putter = self.putters.pop()
+ if putter:
+ getter = self.getters.pop()
+ if getter:
+ item = putter.item
+ # this makes greenlet calling put() not to call _put() again
+ putter.item = _NONE
+ self._put(item)
+ item = self._get()
+ getter.switch(item)
+ putter.switch(putter)
+ else:
+ self.putters.add(putter)
+ elif self.putters and (self.getters or
+ self.maxsize is None or
+ self.qsize() < self.maxsize):
+ putter = self.putters.pop()
+ putter.switch(putter)
+ elif self.putters and not self.getters:
+ full = [p for p in self.putters if not p.block]
+ if not full:
+ break
+ for putter in full:
+ self.putters.discard(putter)
+ get_hub().schedule_call_global(
+ 0, putter.greenlet.throw, Full)
+ else:
+ break
+ finally:
+ self._event_unlock = None # QQQ maybe it's possible to obtain this info from libevent?
+ # i.e. whether this event is pending _OR_ currently executing
+ # testcase: 2 greenlets: while True: q.put(q.get()) - nothing else has a change to execute
+ # to avoid this, schedule unlock with timer(0, ...) once in a while
+
+ def _schedule_unlock(self):
+ if self._event_unlock is None:
+ self._event_unlock = get_hub().schedule_call_global(0, self._unlock)
+
+
+class ItemWaiter(Waiter):
+ __slots__ = ['item', 'block']
+
+ def __init__(self, item, block):
+ Waiter.__init__(self)
+ self.item = item
+ self.block = block
+
+
+class Queue(LightQueue):
+ '''Create a queue object with a given maximum size.
+
+ If *maxsize* is less than zero or ``None``, the queue size is infinite.
+
+ ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks
+ until the item is delivered. (This is unlike the standard
+ :class:`Stdlib_Queue`, where 0 means infinite size).
+
+ In all other respects, this Queue class resembles the standard library,
+ :class:`Stdlib_Queue`.
+ '''
+
+ def __init__(self, maxsize=None):
+ LightQueue.__init__(self, maxsize)
+ self.unfinished_tasks = 0
+ self._cond = Event()
+
+ def _format(self):
+ result = LightQueue._format(self)
+ if self.unfinished_tasks:
+ result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
+ return result
+
+ def _put(self, item):
+ LightQueue._put(self, item)
+ self._put_bookkeeping()
+
+ def _put_bookkeeping(self):
+ self.unfinished_tasks += 1
+ if self._cond.ready():
+ self._cond.reset()
+
+ def task_done(self):
+ '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
+ For each :meth:`get ` used to fetch a task, a subsequent call to
+ :meth:`task_done` tells the queue that the processing on the task is complete.
+
+ If a :meth:`join` is currently blocking, it will resume when all items have been processed
+ (meaning that a :meth:`task_done` call was received for every item that had been
+ :meth:`put ` into the queue).
+
+ Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
+ '''
+
+ if self.unfinished_tasks <= 0:
+ raise ValueError('task_done() called too many times')
+ self.unfinished_tasks -= 1
+ if self.unfinished_tasks == 0:
+ self._cond.send(None)
+
+ def join(self):
+ '''Block until all items in the queue have been gotten and processed.
+
+ The count of unfinished tasks goes up whenever an item is added to the queue.
+ The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
+ that the item was retrieved and all work on it is complete. When the count of
+ unfinished tasks drops to zero, :meth:`join` unblocks.
+ '''
+ if self.unfinished_tasks > 0:
+ self._cond.wait()
+
+
+class PriorityQueue(Queue):
+ '''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
+
+ Entries are typically tuples of the form: ``(priority number, data)``.
+ '''
+
+ def _init(self, maxsize):
+ self.queue = []
+
+ def _put(self, item, heappush=heapq.heappush):
+ heappush(self.queue, item)
+ self._put_bookkeeping()
+
+ def _get(self, heappop=heapq.heappop):
+ return heappop(self.queue)
+
+
+class LifoQueue(Queue):
+ '''A subclass of :class:`Queue` that retrieves most recently added entries first.'''
+
+ def _init(self, maxsize):
+ self.queue = []
+
+ def _put(self, item):
+ self.queue.append(item)
+ self._put_bookkeeping()
+
+ def _get(self):
+ return self.queue.pop()
diff --git a/.venv/Lib/site-packages/eventlet/semaphore.py b/.venv/Lib/site-packages/eventlet/semaphore.py
new file mode 100644
index 0000000..218d01a
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/semaphore.py
@@ -0,0 +1,315 @@
+import collections
+
+import eventlet
+from eventlet import hubs
+
+
+class Semaphore:
+
+ """An unbounded semaphore.
+ Optionally initialize with a resource *count*, then :meth:`acquire` and
+ :meth:`release` resources as needed. Attempting to :meth:`acquire` when
+ *count* is zero suspends the calling greenthread until *count* becomes
+ nonzero again.
+
+ This is API-compatible with :class:`threading.Semaphore`.
+
+ It is a context manager, and thus can be used in a with block::
+
+ sem = Semaphore(2)
+ with sem:
+ do_some_stuff()
+
+ If not specified, *value* defaults to 1.
+
+ It is possible to limit acquire time::
+
+ sem = Semaphore()
+ ok = sem.acquire(timeout=0.1)
+ # True if acquired, False if timed out.
+
+ """
+
+ def __init__(self, value=1):
+ try:
+ value = int(value)
+ except ValueError as e:
+ msg = 'Semaphore() expect value :: int, actual: {} {}'.format(type(value), str(e))
+ raise TypeError(msg)
+ if value < 0:
+ msg = 'Semaphore() expect value >= 0, actual: {}'.format(repr(value))
+ raise ValueError(msg)
+ self.counter = value
+ self._waiters = collections.deque()
+
+ def __repr__(self):
+ params = (self.__class__.__name__, hex(id(self)),
+ self.counter, len(self._waiters))
+ return '<%s at %s c=%s _w[%s]>' % params
+
+ def __str__(self):
+ params = (self.__class__.__name__, self.counter, len(self._waiters))
+ return '<%s c=%s _w[%s]>' % params
+
+ def locked(self):
+ """Returns true if a call to acquire would block.
+ """
+ return self.counter <= 0
+
+ def bounded(self):
+ """Returns False; for consistency with
+ :class:`~eventlet.semaphore.CappedSemaphore`.
+ """
+ return False
+
+ def acquire(self, blocking=True, timeout=None):
+ """Acquire a semaphore.
+
+ When invoked without arguments: if the internal counter is larger than
+ zero on entry, decrement it by one and return immediately. If it is zero
+ on entry, block, waiting until some other thread has called release() to
+ make it larger than zero. This is done with proper interlocking so that
+ if multiple acquire() calls are blocked, release() will wake exactly one
+ of them up. The implementation may pick one at random, so the order in
+ which blocked threads are awakened should not be relied on. There is no
+ return value in this case.
+
+ When invoked with blocking set to true, do the same thing as when called
+ without arguments, and return true.
+
+ When invoked with blocking set to false, do not block. If a call without
+ an argument would block, return false immediately; otherwise, do the
+ same thing as when called without arguments, and return true.
+
+ Timeout value must be strictly positive.
+ """
+ if timeout == -1:
+ timeout = None
+ if timeout is not None and timeout < 0:
+ raise ValueError("timeout value must be strictly positive")
+ if not blocking:
+ if timeout is not None:
+ raise ValueError("can't specify timeout for non-blocking acquire")
+ timeout = 0
+ if not blocking and self.locked():
+ return False
+
+ current_thread = eventlet.getcurrent()
+
+ if self.counter <= 0 or self._waiters:
+ if current_thread not in self._waiters:
+ self._waiters.append(current_thread)
+ try:
+ if timeout is not None:
+ ok = False
+ with eventlet.Timeout(timeout, False):
+ while self.counter <= 0:
+ hubs.get_hub().switch()
+ ok = True
+ if not ok:
+ return False
+ else:
+ # If someone else is already in this wait loop, give them
+ # a chance to get out.
+ while True:
+ hubs.get_hub().switch()
+ if self.counter > 0:
+ break
+ finally:
+ try:
+ self._waiters.remove(current_thread)
+ except ValueError:
+ # Fine if its already been dropped.
+ pass
+
+ self.counter -= 1
+ return True
+
+ def __enter__(self):
+ self.acquire()
+
+ def release(self, blocking=True):
+ """Release a semaphore, incrementing the internal counter by one. When
+ it was zero on entry and another thread is waiting for it to become
+ larger than zero again, wake up that thread.
+
+ The *blocking* argument is for consistency with CappedSemaphore and is
+ ignored
+ """
+ self.counter += 1
+ if self._waiters:
+ hubs.get_hub().schedule_call_global(0, self._do_acquire)
+ return True
+
+ def _do_acquire(self):
+ if self._waiters and self.counter > 0:
+ waiter = self._waiters.popleft()
+ waiter.switch()
+
+ def __exit__(self, typ, val, tb):
+ self.release()
+
+ @property
+ def balance(self):
+ """An integer value that represents how many new calls to
+ :meth:`acquire` or :meth:`release` would be needed to get the counter to
+ 0. If it is positive, then its value is the number of acquires that can
+ happen before the next acquire would block. If it is negative, it is
+ the negative of the number of releases that would be required in order
+ to make the counter 0 again (one more release would push the counter to
+ 1 and unblock acquirers). It takes into account how many greenthreads
+ are currently blocking in :meth:`acquire`.
+ """
+ # positive means there are free items
+ # zero means there are no free items but nobody has requested one
+ # negative means there are requests for items, but no items
+ return self.counter - len(self._waiters)
+
+
+class BoundedSemaphore(Semaphore):
+
+ """A bounded semaphore checks to make sure its current value doesn't exceed
+ its initial value. If it does, ValueError is raised. In most situations
+ semaphores are used to guard resources with limited capacity. If the
+ semaphore is released too many times it's a sign of a bug. If not given,
+ *value* defaults to 1.
+ """
+
+ def __init__(self, value=1):
+ super().__init__(value)
+ self.original_counter = value
+
+ def release(self, blocking=True):
+ """Release a semaphore, incrementing the internal counter by one. If
+ the counter would exceed the initial value, raises ValueError. When
+ it was zero on entry and another thread is waiting for it to become
+ larger than zero again, wake up that thread.
+
+ The *blocking* argument is for consistency with :class:`CappedSemaphore`
+ and is ignored
+ """
+ if self.counter >= self.original_counter:
+ raise ValueError("Semaphore released too many times")
+ return super().release(blocking)
+
+
+class CappedSemaphore:
+
+ """A blockingly bounded semaphore.
+
+ Optionally initialize with a resource *count*, then :meth:`acquire` and
+ :meth:`release` resources as needed. Attempting to :meth:`acquire` when
+ *count* is zero suspends the calling greenthread until count becomes nonzero
+ again. Attempting to :meth:`release` after *count* has reached *limit*
+ suspends the calling greenthread until *count* becomes less than *limit*
+ again.
+
+ This has the same API as :class:`threading.Semaphore`, though its
+ semantics and behavior differ subtly due to the upper limit on calls
+ to :meth:`release`. It is **not** compatible with
+ :class:`threading.BoundedSemaphore` because it blocks when reaching *limit*
+ instead of raising a ValueError.
+
+ It is a context manager, and thus can be used in a with block::
+
+ sem = CappedSemaphore(2)
+ with sem:
+ do_some_stuff()
+ """
+
+ def __init__(self, count, limit):
+ if count < 0:
+ raise ValueError("CappedSemaphore must be initialized with a "
+ "positive number, got %s" % count)
+ if count > limit:
+ # accidentally, this also catches the case when limit is None
+ raise ValueError("'count' cannot be more than 'limit'")
+ self.lower_bound = Semaphore(count)
+ self.upper_bound = Semaphore(limit - count)
+
+ def __repr__(self):
+ params = (self.__class__.__name__, hex(id(self)),
+ self.balance, self.lower_bound, self.upper_bound)
+ return '<%s at %s b=%s l=%s u=%s>' % params
+
+ def __str__(self):
+ params = (self.__class__.__name__, self.balance,
+ self.lower_bound, self.upper_bound)
+ return '<%s b=%s l=%s u=%s>' % params
+
+ def locked(self):
+ """Returns true if a call to acquire would block.
+ """
+ return self.lower_bound.locked()
+
+ def bounded(self):
+ """Returns true if a call to release would block.
+ """
+ return self.upper_bound.locked()
+
+ def acquire(self, blocking=True):
+ """Acquire a semaphore.
+
+ When invoked without arguments: if the internal counter is larger than
+ zero on entry, decrement it by one and return immediately. If it is zero
+ on entry, block, waiting until some other thread has called release() to
+ make it larger than zero. This is done with proper interlocking so that
+ if multiple acquire() calls are blocked, release() will wake exactly one
+ of them up. The implementation may pick one at random, so the order in
+ which blocked threads are awakened should not be relied on. There is no
+ return value in this case.
+
+ When invoked with blocking set to true, do the same thing as when called
+ without arguments, and return true.
+
+ When invoked with blocking set to false, do not block. If a call without
+ an argument would block, return false immediately; otherwise, do the
+ same thing as when called without arguments, and return true.
+ """
+ if not blocking and self.locked():
+ return False
+ self.upper_bound.release()
+ try:
+ return self.lower_bound.acquire()
+ except:
+ self.upper_bound.counter -= 1
+ # using counter directly means that it can be less than zero.
+ # however I certainly don't need to wait here and I don't seem to have
+ # a need to care about such inconsistency
+ raise
+
+ def __enter__(self):
+ self.acquire()
+
+ def release(self, blocking=True):
+ """Release a semaphore. In this class, this behaves very much like
+ an :meth:`acquire` but in the opposite direction.
+
+ Imagine the docs of :meth:`acquire` here, but with every direction
+ reversed. When calling this method, it will block if the internal
+ counter is greater than or equal to *limit*.
+ """
+ if not blocking and self.bounded():
+ return False
+ self.lower_bound.release()
+ try:
+ return self.upper_bound.acquire()
+ except:
+ self.lower_bound.counter -= 1
+ raise
+
+ def __exit__(self, typ, val, tb):
+ self.release()
+
+ @property
+ def balance(self):
+ """An integer value that represents how many new calls to
+ :meth:`acquire` or :meth:`release` would be needed to get the counter to
+ 0. If it is positive, then its value is the number of acquires that can
+ happen before the next acquire would block. If it is negative, it is
+ the negative of the number of releases that would be required in order
+ to make the counter 0 again (one more release would push the counter to
+ 1 and unblock acquirers). It takes into account how many greenthreads
+ are currently blocking in :meth:`acquire` and :meth:`release`.
+ """
+ return self.lower_bound.balance - self.upper_bound.balance
diff --git a/.venv/Lib/site-packages/eventlet/support/__init__.py b/.venv/Lib/site-packages/eventlet/support/__init__.py
new file mode 100644
index 0000000..b1c1607
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/support/__init__.py
@@ -0,0 +1,69 @@
+import inspect
+import functools
+import sys
+import warnings
+
+from eventlet.support import greenlets
+
+
+_MISSING = object()
+
+
+def get_errno(exc):
+ """ Get the error code out of socket.error objects.
+ socket.error in <2.5 does not have errno attribute
+ socket.error in 3.x does not allow indexing access
+ e.args[0] works for all.
+ There are cases when args[0] is not errno.
+ i.e. http://bugs.python.org/issue6471
+ Maybe there are cases when errno is set, but it is not the first argument?
+ """
+
+ try:
+ if exc.errno is not None:
+ return exc.errno
+ except AttributeError:
+ pass
+ try:
+ return exc.args[0]
+ except IndexError:
+ return None
+
+
+if sys.version_info[0] < 3:
+ def bytes_to_str(b, encoding='ascii'):
+ return b
+else:
+ def bytes_to_str(b, encoding='ascii'):
+ return b.decode(encoding)
+
+PY33 = sys.version_info[:2] == (3, 3)
+
+
+def wrap_deprecated(old, new):
+ def _resolve(s):
+ return 'eventlet.'+s if '.' not in s else s
+ msg = '''\
+{old} is deprecated and will be removed in next version. Use {new} instead.
+Autoupgrade: fgrep -rl '{old}' . |xargs -t sed --in-place='' -e 's/{old}/{new}/'
+'''.format(old=_resolve(old), new=_resolve(new))
+
+ def wrapper(base):
+ klass = None
+ if inspect.isclass(base):
+ class klass(base):
+ pass
+ klass.__name__ = base.__name__
+ klass.__module__ = base.__module__
+
+ @functools.wraps(base)
+ def wrapped(*a, **kw):
+ warnings.warn(msg, DeprecationWarning, stacklevel=5)
+ return base(*a, **kw)
+
+ if klass is not None:
+ klass.__init__ = wrapped
+ return klass
+
+ return wrapped
+ return wrapper
diff --git a/.venv/Lib/site-packages/eventlet/support/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/support/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..2b64bb6
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/support/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/support/__pycache__/greendns.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/support/__pycache__/greendns.cpython-312.pyc
new file mode 100644
index 0000000..1fe2140
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/support/__pycache__/greendns.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/support/__pycache__/greenlets.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/support/__pycache__/greenlets.cpython-312.pyc
new file mode 100644
index 0000000..6e9fe60
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/support/__pycache__/greenlets.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/support/__pycache__/psycopg2_patcher.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/support/__pycache__/psycopg2_patcher.cpython-312.pyc
new file mode 100644
index 0000000..5bfd621
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/support/__pycache__/psycopg2_patcher.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/support/__pycache__/pylib.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/support/__pycache__/pylib.cpython-312.pyc
new file mode 100644
index 0000000..3bb2cac
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/support/__pycache__/pylib.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/support/__pycache__/stacklesspypys.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/support/__pycache__/stacklesspypys.cpython-312.pyc
new file mode 100644
index 0000000..59c58b9
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/support/__pycache__/stacklesspypys.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/support/__pycache__/stacklesss.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/support/__pycache__/stacklesss.cpython-312.pyc
new file mode 100644
index 0000000..18c33d3
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/support/__pycache__/stacklesss.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/support/greendns.py b/.venv/Lib/site-packages/eventlet/support/greendns.py
new file mode 100644
index 0000000..365664f
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/support/greendns.py
@@ -0,0 +1,959 @@
+'''greendns - non-blocking DNS support for Eventlet
+'''
+
+# Portions of this code taken from the gogreen project:
+# http://github.com/slideinc/gogreen
+#
+# Copyright (c) 2005-2010 Slide, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the author nor the names of other
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import re
+import struct
+import sys
+
+import eventlet
+from eventlet import patcher
+from eventlet.green import _socket_nodns
+from eventlet.green import os
+from eventlet.green import time
+from eventlet.green import select
+from eventlet.green import ssl
+
+
+def import_patched(module_name):
+ # Import cycle note: it's crucial to use _socket_nodns here because
+ # regular evenlet.green.socket imports *this* module and if we imported
+ # it back we'd end with an import cycle (socket -> greendns -> socket).
+ # We break this import cycle by providing a restricted socket module.
+ modules = {
+ 'select': select,
+ 'time': time,
+ 'os': os,
+ 'socket': _socket_nodns,
+ 'ssl': ssl,
+ }
+ return patcher.import_patched(module_name, **modules)
+
+
+dns = import_patched('dns')
+
+# Handle rdtypes separately; we need fully it available as we patch the rest
+dns.rdtypes = import_patched('dns.rdtypes')
+dns.rdtypes.__all__.extend(['dnskeybase', 'dsbase', 'txtbase'])
+for pkg in dns.rdtypes.__all__:
+ setattr(dns.rdtypes, pkg, import_patched('dns.rdtypes.' + pkg))
+for pkg in dns.rdtypes.IN.__all__:
+ setattr(dns.rdtypes.IN, pkg, import_patched('dns.rdtypes.IN.' + pkg))
+for pkg in dns.rdtypes.ANY.__all__:
+ setattr(dns.rdtypes.ANY, pkg, import_patched('dns.rdtypes.ANY.' + pkg))
+
+for pkg in dns.__all__:
+ if pkg == 'rdtypes':
+ continue
+ setattr(dns, pkg, import_patched('dns.' + pkg))
+del import_patched
+
+
+socket = _socket_nodns
+
+DNS_QUERY_TIMEOUT = 10.0
+HOSTS_TTL = 10.0
+
+# NOTE(victor): do not use EAI_*_ERROR instances for raising errors in python3, which will cause a memory leak.
+EAI_EAGAIN_ERROR = socket.gaierror(socket.EAI_AGAIN, 'Lookup timed out')
+EAI_NONAME_ERROR = socket.gaierror(socket.EAI_NONAME, 'Name or service not known')
+# EAI_NODATA was removed from RFC3493, it's now replaced with EAI_NONAME
+# socket.EAI_NODATA is not defined on FreeBSD, probably on some other platforms too.
+# https://lists.freebsd.org/pipermail/freebsd-ports/2003-October/005757.html
+EAI_NODATA_ERROR = EAI_NONAME_ERROR
+if (os.environ.get('EVENTLET_DEPRECATED_EAI_NODATA', '').lower() in ('1', 'y', 'yes')
+ and hasattr(socket, 'EAI_NODATA')):
+ EAI_NODATA_ERROR = socket.gaierror(socket.EAI_NODATA, 'No address associated with hostname')
+
+
+def _raise_new_error(error_instance):
+ raise error_instance.__class__(*error_instance.args)
+
+
+def is_ipv4_addr(host):
+ """Return True if host is a valid IPv4 address"""
+ if not isinstance(host, str):
+ return False
+ try:
+ dns.ipv4.inet_aton(host)
+ except dns.exception.SyntaxError:
+ return False
+ else:
+ return True
+
+
+def is_ipv6_addr(host):
+ """Return True if host is a valid IPv6 address"""
+ if not isinstance(host, str):
+ return False
+ host = host.split('%', 1)[0]
+ try:
+ dns.ipv6.inet_aton(host)
+ except dns.exception.SyntaxError:
+ return False
+ else:
+ return True
+
+
+def is_ip_addr(host):
+ """Return True if host is a valid IPv4 or IPv6 address"""
+ return is_ipv4_addr(host) or is_ipv6_addr(host)
+
+
+# NOTE(ralonsoh): in dnspython v2.0.0, "_compute_expiration" was replaced
+# by "_compute_times".
+if hasattr(dns.query, '_compute_expiration'):
+ def compute_expiration(query, timeout):
+ return query._compute_expiration(timeout)
+else:
+ def compute_expiration(query, timeout):
+ return query._compute_times(timeout)[1]
+
+
+class HostsAnswer(dns.resolver.Answer):
+ """Answer class for HostsResolver object"""
+
+ def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True):
+ """Create a new answer
+
+ :qname: A dns.name.Name instance of the query name
+ :rdtype: The rdatatype of the query
+ :rdclass: The rdataclass of the query
+ :rrset: The dns.rrset.RRset with the response, must have ttl attribute
+ :raise_on_no_answer: Whether to raise dns.resolver.NoAnswer if no
+ answer.
+ """
+ self.response = None
+ self.qname = qname
+ self.rdtype = rdtype
+ self.rdclass = rdclass
+ self.canonical_name = qname
+ if not rrset and raise_on_no_answer:
+ raise dns.resolver.NoAnswer()
+ self.rrset = rrset
+ self.expiration = (time.time() +
+ rrset.ttl if hasattr(rrset, 'ttl') else 0)
+
+
+class HostsResolver:
+ """Class to parse the hosts file
+
+ Attributes
+ ----------
+
+ :fname: The filename of the hosts file in use.
+ :interval: The time between checking for hosts file modification
+ """
+
+ LINES_RE = re.compile(r"""
+ \s* # Leading space
+ ([^\r\n#]*?) # The actual match, non-greedy so as not to include trailing space
+ \s* # Trailing space
+ (?:[#][^\r\n]+)? # Comments
+ (?:$|[\r\n]+) # EOF or newline
+ """, re.VERBOSE)
+
+ def __init__(self, fname=None, interval=HOSTS_TTL):
+ self._v4 = {} # name -> ipv4
+ self._v6 = {} # name -> ipv6
+ self._aliases = {} # name -> canonical_name
+ self.interval = interval
+ self.fname = fname
+ if fname is None:
+ if os.name == 'posix':
+ self.fname = '/etc/hosts'
+ elif os.name == 'nt':
+ self.fname = os.path.expandvars(
+ r'%SystemRoot%\system32\drivers\etc\hosts')
+ self._last_load = 0
+ if self.fname:
+ self._load()
+
+ def _readlines(self):
+ """Read the contents of the hosts file
+
+ Return list of lines, comment lines and empty lines are
+ excluded.
+
+ Note that this performs disk I/O so can be blocking.
+ """
+ try:
+ with open(self.fname, 'rb') as fp:
+ fdata = fp.read()
+ except OSError:
+ return []
+
+ udata = fdata.decode(errors='ignore')
+
+ return filter(None, self.LINES_RE.findall(udata))
+
+ def _load(self):
+ """Load hosts file
+
+ This will unconditionally (re)load the data from the hosts
+ file.
+ """
+ lines = self._readlines()
+ self._v4.clear()
+ self._v6.clear()
+ self._aliases.clear()
+ for line in lines:
+ parts = line.split()
+ if len(parts) < 2:
+ continue
+ ip = parts.pop(0)
+ if is_ipv4_addr(ip):
+ ipmap = self._v4
+ elif is_ipv6_addr(ip):
+ if ip.startswith('fe80'):
+ # Do not use link-local addresses, OSX stores these here
+ continue
+ ipmap = self._v6
+ else:
+ continue
+ cname = parts.pop(0).lower()
+ ipmap[cname] = ip
+ for alias in parts:
+ alias = alias.lower()
+ ipmap[alias] = ip
+ self._aliases[alias] = cname
+ self._last_load = time.time()
+
+ def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None, raise_on_no_answer=True):
+ """Query the hosts file
+
+ The known rdtypes are dns.rdatatype.A, dns.rdatatype.AAAA and
+ dns.rdatatype.CNAME.
+
+ The ``rdclass`` parameter must be dns.rdataclass.IN while the
+ ``tcp`` and ``source`` parameters are ignored.
+
+ Return a HostAnswer instance or raise a dns.resolver.NoAnswer
+ exception.
+ """
+ now = time.time()
+ if self._last_load + self.interval < now:
+ self._load()
+ rdclass = dns.rdataclass.IN
+ if isinstance(qname, str):
+ name = qname
+ qname = dns.name.from_text(qname)
+ elif isinstance(qname, bytes):
+ name = qname.decode("ascii")
+ qname = dns.name.from_text(qname)
+ else:
+ name = str(qname)
+ name = name.lower()
+ rrset = dns.rrset.RRset(qname, rdclass, rdtype)
+ rrset.ttl = self._last_load + self.interval - now
+ if rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.A:
+ addr = self._v4.get(name)
+ if not addr and qname.is_absolute():
+ addr = self._v4.get(name[:-1])
+ if addr:
+ rrset.add(dns.rdtypes.IN.A.A(rdclass, rdtype, addr))
+ elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.AAAA:
+ addr = self._v6.get(name)
+ if not addr and qname.is_absolute():
+ addr = self._v6.get(name[:-1])
+ if addr:
+ rrset.add(dns.rdtypes.IN.AAAA.AAAA(rdclass, rdtype, addr))
+ elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.CNAME:
+ cname = self._aliases.get(name)
+ if not cname and qname.is_absolute():
+ cname = self._aliases.get(name[:-1])
+ if cname:
+ rrset.add(dns.rdtypes.ANY.CNAME.CNAME(
+ rdclass, rdtype, dns.name.from_text(cname)))
+ return HostsAnswer(qname, rdtype, rdclass, rrset, raise_on_no_answer)
+
+ def getaliases(self, hostname):
+ """Return a list of all the aliases of a given cname"""
+ # Due to the way store aliases this is a bit inefficient, this
+ # clearly was an afterthought. But this is only used by
+ # gethostbyname_ex so it's probably fine.
+ aliases = []
+ if hostname in self._aliases:
+ cannon = self._aliases[hostname]
+ else:
+ cannon = hostname
+ aliases.append(cannon)
+ for alias, cname in self._aliases.items():
+ if cannon == cname:
+ aliases.append(alias)
+ aliases.remove(hostname)
+ return aliases
+
+
+class ResolverProxy:
+ """Resolver class which can also use /etc/hosts
+
+ Initialise with a HostsResolver instance in order for it to also
+ use the hosts file.
+ """
+
+ def __init__(self, hosts_resolver=None, filename='/etc/resolv.conf'):
+ """Initialise the resolver proxy
+
+ :param hosts_resolver: An instance of HostsResolver to use.
+
+ :param filename: The filename containing the resolver
+ configuration. The default value is correct for both UNIX
+ and Windows, on Windows it will result in the configuration
+ being read from the Windows registry.
+ """
+ self._hosts = hosts_resolver
+ self._filename = filename
+ # NOTE(dtantsur): we cannot create a resolver here since this code is
+ # executed on eventlet import. In an environment without DNS, creating
+ # a Resolver will fail making eventlet unusable at all. See
+ # https://github.com/eventlet/eventlet/issues/736 for details.
+ self._cached_resolver = None
+
+ @property
+ def _resolver(self):
+ if self._cached_resolver is None:
+ self.clear()
+ return self._cached_resolver
+
+ @_resolver.setter
+ def _resolver(self, value):
+ self._cached_resolver = value
+
+ def clear(self):
+ self._resolver = dns.resolver.Resolver(filename=self._filename)
+ self._resolver.cache = dns.resolver.LRUCache()
+
+ def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None, raise_on_no_answer=True,
+ _hosts_rdtypes=(dns.rdatatype.A, dns.rdatatype.AAAA),
+ use_network=True):
+ """Query the resolver, using /etc/hosts if enabled.
+
+ Behavior:
+ 1. if hosts is enabled and contains answer, return it now
+ 2. query nameservers for qname if use_network is True
+ 3. if qname did not contain dots, pretend it was top-level domain,
+ query "foobar." and append to previous result
+ """
+ result = [None, None, 0]
+
+ if qname is None:
+ qname = '0.0.0.0'
+ if isinstance(qname, str) or isinstance(qname, bytes):
+ qname = dns.name.from_text(qname, None)
+
+ def step(fun, *args, **kwargs):
+ try:
+ a = fun(*args, **kwargs)
+ except Exception as e:
+ result[1] = e
+ return False
+ if a.rrset is not None and len(a.rrset):
+ if result[0] is None:
+ result[0] = a
+ else:
+ result[0].rrset.union_update(a.rrset)
+ result[2] += len(a.rrset)
+ return True
+
+ def end():
+ if result[0] is not None:
+ if raise_on_no_answer and result[2] == 0:
+ raise dns.resolver.NoAnswer
+ return result[0]
+ if result[1] is not None:
+ if raise_on_no_answer or not isinstance(result[1], dns.resolver.NoAnswer):
+ raise result[1]
+ raise dns.resolver.NXDOMAIN(qnames=(qname,))
+
+ if (self._hosts and (rdclass == dns.rdataclass.IN) and (rdtype in _hosts_rdtypes)):
+ if step(self._hosts.query, qname, rdtype, raise_on_no_answer=False):
+ if (result[0] is not None) or (result[1] is not None) or (not use_network):
+ return end()
+
+ # Main query
+ step(self._resolver.query, qname, rdtype, rdclass, tcp, source, raise_on_no_answer=False)
+
+ # `resolv.conf` docs say unqualified names must resolve from search (or local) domain.
+ # However, common OS `getaddrinfo()` implementations append trailing dot (e.g. `db -> db.`)
+ # and ask nameservers, as if top-level domain was queried.
+ # This step follows established practice.
+ # https://github.com/nameko/nameko/issues/392
+ # https://github.com/eventlet/eventlet/issues/363
+ if len(qname) == 1:
+ step(self._resolver.query, qname.concatenate(dns.name.root),
+ rdtype, rdclass, tcp, source, raise_on_no_answer=False)
+
+ return end()
+
+ def getaliases(self, hostname):
+ """Return a list of all the aliases of a given hostname"""
+ if self._hosts:
+ aliases = self._hosts.getaliases(hostname)
+ else:
+ aliases = []
+ while True:
+ try:
+ ans = self._resolver.query(hostname, dns.rdatatype.CNAME)
+ except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
+ break
+ else:
+ aliases.extend(str(rr.target) for rr in ans.rrset)
+ hostname = ans[0].target
+ return aliases
+
+
+resolver = ResolverProxy(hosts_resolver=HostsResolver())
+
+
+def resolve(name, family=socket.AF_INET, raises=True, _proxy=None,
+ use_network=True):
+ """Resolve a name for a given family using the global resolver proxy.
+
+ This method is called by the global getaddrinfo() function. If use_network
+ is False, only resolution via hosts file will be performed.
+
+ Return a dns.resolver.Answer instance. If there is no answer it's
+ rrset will be emtpy.
+ """
+ if family == socket.AF_INET:
+ rdtype = dns.rdatatype.A
+ elif family == socket.AF_INET6:
+ rdtype = dns.rdatatype.AAAA
+ else:
+ raise socket.gaierror(socket.EAI_FAMILY,
+ 'Address family not supported')
+
+ if _proxy is None:
+ _proxy = resolver
+ try:
+ try:
+ return _proxy.query(name, rdtype, raise_on_no_answer=raises,
+ use_network=use_network)
+ except dns.resolver.NXDOMAIN:
+ if not raises:
+ return HostsAnswer(dns.name.Name(name),
+ rdtype, dns.rdataclass.IN, None, False)
+ raise
+ except dns.exception.Timeout:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ _raise_new_error(EAI_NODATA_ERROR)
+
+
+def resolve_cname(host):
+ """Return the canonical name of a hostname"""
+ try:
+ ans = resolver.query(host, dns.rdatatype.CNAME)
+ except dns.resolver.NoAnswer:
+ return host
+ except dns.exception.Timeout:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ _raise_new_error(EAI_NODATA_ERROR)
+ else:
+ return str(ans[0].target)
+
+
+def getaliases(host):
+ """Return a list of for aliases for the given hostname
+
+ This method does translate the dnspython exceptions into
+ socket.gaierror exceptions. If no aliases are available an empty
+ list will be returned.
+ """
+ try:
+ return resolver.getaliases(host)
+ except dns.exception.Timeout:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ _raise_new_error(EAI_NODATA_ERROR)
+
+
+def _getaddrinfo_lookup(host, family, flags):
+ """Resolve a hostname to a list of addresses
+
+ Helper function for getaddrinfo.
+ """
+ if flags & socket.AI_NUMERICHOST:
+ _raise_new_error(EAI_NONAME_ERROR)
+ addrs = []
+ if family == socket.AF_UNSPEC:
+ err = None
+ for use_network in [False, True]:
+ for qfamily in [socket.AF_INET6, socket.AF_INET]:
+ try:
+ answer = resolve(host, qfamily, False, use_network=use_network)
+ except socket.gaierror as e:
+ if e.errno not in (socket.EAI_AGAIN, EAI_NONAME_ERROR.errno, EAI_NODATA_ERROR.errno):
+ raise
+ err = e
+ else:
+ if answer.rrset:
+ addrs.extend(rr.address for rr in answer.rrset)
+ if addrs:
+ break
+ if err is not None and not addrs:
+ raise err
+ elif family == socket.AF_INET6 and flags & socket.AI_V4MAPPED:
+ answer = resolve(host, socket.AF_INET6, False)
+ if answer.rrset:
+ addrs = [rr.address for rr in answer.rrset]
+ if not addrs or flags & socket.AI_ALL:
+ answer = resolve(host, socket.AF_INET, False)
+ if answer.rrset:
+ addrs = ['::ffff:' + rr.address for rr in answer.rrset]
+ else:
+ answer = resolve(host, family, False)
+ if answer.rrset:
+ addrs = [rr.address for rr in answer.rrset]
+ return str(answer.qname), addrs
+
+
+def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
+ """Replacement for Python's socket.getaddrinfo
+
+ This does the A and AAAA lookups asynchronously after which it
+ calls the OS' getaddrinfo(3) using the AI_NUMERICHOST flag. This
+ flag ensures getaddrinfo(3) does not use the network itself and
+ allows us to respect all the other arguments like the native OS.
+ """
+ if isinstance(host, str):
+ host = host.encode('idna').decode('ascii')
+ elif isinstance(host, bytes):
+ host = host.decode("ascii")
+ if host is not None and not is_ip_addr(host):
+ qname, addrs = _getaddrinfo_lookup(host, family, flags)
+ else:
+ qname = host
+ addrs = [host]
+ aiflags = (flags | socket.AI_NUMERICHOST) & (0xffff ^ socket.AI_CANONNAME)
+ res = []
+ err = None
+ for addr in addrs:
+ try:
+ ai = socket.getaddrinfo(addr, port, family,
+ type, proto, aiflags)
+ except OSError as e:
+ if flags & socket.AI_ADDRCONFIG:
+ err = e
+ continue
+ raise
+ res.extend(ai)
+ if not res:
+ if err:
+ raise err
+ raise socket.gaierror(socket.EAI_NONAME, 'No address found')
+ if flags & socket.AI_CANONNAME:
+ if not is_ip_addr(qname):
+ qname = resolve_cname(qname).encode('ascii').decode('idna')
+ ai = res[0]
+ res[0] = (ai[0], ai[1], ai[2], qname, ai[4])
+ return res
+
+
+def gethostbyname(hostname):
+ """Replacement for Python's socket.gethostbyname"""
+ if is_ipv4_addr(hostname):
+ return hostname
+ rrset = resolve(hostname)
+ return rrset[0].address
+
+
+def gethostbyname_ex(hostname):
+ """Replacement for Python's socket.gethostbyname_ex"""
+ if is_ipv4_addr(hostname):
+ return (hostname, [], [hostname])
+ ans = resolve(hostname)
+ aliases = getaliases(hostname)
+ addrs = [rr.address for rr in ans.rrset]
+ qname = str(ans.qname)
+ if qname[-1] == '.':
+ qname = qname[:-1]
+ return (qname, aliases, addrs)
+
+
+def getnameinfo(sockaddr, flags):
+ """Replacement for Python's socket.getnameinfo.
+
+ Currently only supports IPv4.
+ """
+ try:
+ host, port = sockaddr
+ except (ValueError, TypeError):
+ if not isinstance(sockaddr, tuple):
+ del sockaddr # to pass a stdlib test that is
+ # hyper-careful about reference counts
+ raise TypeError('getnameinfo() argument 1 must be a tuple')
+ else:
+ # must be ipv6 sockaddr, pretending we don't know how to resolve it
+ _raise_new_error(EAI_NONAME_ERROR)
+
+ if (flags & socket.NI_NAMEREQD) and (flags & socket.NI_NUMERICHOST):
+ # Conflicting flags. Punt.
+ _raise_new_error(EAI_NONAME_ERROR)
+
+ if is_ipv4_addr(host):
+ try:
+ rrset = resolver.query(
+ dns.reversename.from_address(host), dns.rdatatype.PTR)
+ if len(rrset) > 1:
+ raise OSError('sockaddr resolved to multiple addresses')
+ host = rrset[0].target.to_text(omit_final_dot=True)
+ except dns.exception.Timeout:
+ if flags & socket.NI_NAMEREQD:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ if flags & socket.NI_NAMEREQD:
+ _raise_new_error(EAI_NONAME_ERROR)
+ else:
+ try:
+ rrset = resolver.query(host)
+ if len(rrset) > 1:
+ raise OSError('sockaddr resolved to multiple addresses')
+ if flags & socket.NI_NUMERICHOST:
+ host = rrset[0].address
+ except dns.exception.Timeout:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ raise socket.gaierror(
+ (socket.EAI_NODATA, 'No address associated with hostname'))
+
+ if not (flags & socket.NI_NUMERICSERV):
+ proto = (flags & socket.NI_DGRAM) and 'udp' or 'tcp'
+ port = socket.getservbyport(port, proto)
+
+ return (host, port)
+
+
+def _net_read(sock, count, expiration):
+ """coro friendly replacement for dns.query._net_read
+ Read the specified number of bytes from sock. Keep trying until we
+ either get the desired amount, or we hit EOF.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ s = bytearray()
+ while count > 0:
+ try:
+ n = sock.recv(count)
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ if expiration - time.time() <= 0.0:
+ raise dns.exception.Timeout
+ eventlet.sleep(0.01)
+ continue
+ if n == b'':
+ raise EOFError
+ count = count - len(n)
+ s += n
+ return s
+
+
+def _net_write(sock, data, expiration):
+ """coro friendly replacement for dns.query._net_write
+ Write the specified data to the socket.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ current = 0
+ l = len(data)
+ while current < l:
+ try:
+ current += sock.send(data[current:])
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ if expiration - time.time() <= 0.0:
+ raise dns.exception.Timeout
+
+
+# Test if raise_on_truncation is an argument we should handle.
+# It was newly added in dnspython 2.0
+try:
+ dns.message.from_wire("", raise_on_truncation=True)
+except dns.message.ShortHeader:
+ _handle_raise_on_truncation = True
+except TypeError:
+ # Argument error, there is no argument "raise_on_truncation"
+ _handle_raise_on_truncation = False
+
+
+def udp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
+ af=None, source=None, source_port=0, ignore_unexpected=False,
+ one_rr_per_rrset=False, ignore_trailing=False,
+ raise_on_truncation=False, sock=None, ignore_errors=False):
+ """coro friendly replacement for dns.query.udp
+ Return the response obtained after sending a query via UDP.
+
+ @param q: the query
+ @type q: dns.message.Message
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param timeout: The number of seconds to wait before the query times out.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @rtype: dns.message.Message object
+ @param source: source address. The default is the IPv4 wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @param ignore_unexpected: If True, ignore responses from unexpected
+ sources. The default is False.
+ @type ignore_unexpected: bool
+ @param one_rr_per_rrset: If True, put each RR into its own
+ RRset.
+ @type one_rr_per_rrset: bool
+ @param ignore_trailing: If True, ignore trailing
+ junk at end of the received message.
+ @type ignore_trailing: bool
+ @param raise_on_truncation: If True, raise an exception if
+ the TC bit is set.
+ @type raise_on_truncation: bool
+ @param sock: the socket to use for the
+ query. If None, the default, a socket is created. Note that
+ if a socket is provided, it must be a nonblocking datagram socket,
+ and the source and source_port are ignored.
+ @type sock: socket.socket | None
+ @param ignore_errors: if various format errors or response mismatches occur,
+ continue listening.
+ @type ignore_errors: bool"""
+
+ wire = q.to_wire()
+ if af is None:
+ try:
+ af = dns.inet.af_for_address(where)
+ except:
+ af = dns.inet.AF_INET
+ if af == dns.inet.AF_INET:
+ destination = (where, port)
+ if source is not None:
+ source = (source, source_port)
+ elif af == dns.inet.AF_INET6:
+ # Purge any stray zeroes in source address. When doing the tuple comparison
+ # below, we need to always ensure both our target and where we receive replies
+ # from are compared with all zeroes removed so that we don't erroneously fail.
+ # e.g. ('00::1', 53, 0, 0) != ('::1', 53, 0, 0)
+ where_trunc = dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(where))
+ destination = (where_trunc, port, 0, 0)
+ if source is not None:
+ source = (source, source_port, 0, 0)
+
+ if sock:
+ s = sock
+ else:
+ s = socket.socket(af, socket.SOCK_DGRAM)
+ s.settimeout(timeout)
+ try:
+ expiration = compute_expiration(dns.query, timeout)
+ if source is not None:
+ s.bind(source)
+ while True:
+ try:
+ s.sendto(wire, destination)
+ break
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ if expiration - time.time() <= 0.0:
+ raise dns.exception.Timeout
+ eventlet.sleep(0.01)
+ continue
+
+ tried = False
+ while True:
+ # If we've tried to receive at least once, check to see if our
+ # timer expired
+ if tried and (expiration - time.time() <= 0.0):
+ raise dns.exception.Timeout
+ # Sleep if we are retrying the operation due to a bad source
+ # address or a socket timeout.
+ if tried:
+ eventlet.sleep(0.01)
+ tried = True
+
+ try:
+ (wire, from_address) = s.recvfrom(65535)
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ continue
+ if dns.inet.af_for_address(from_address[0]) == dns.inet.AF_INET6:
+ # Purge all possible zeroes for ipv6 to match above logic
+ addr = from_address[0]
+ addr = dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(addr))
+ from_address = (addr, from_address[1], from_address[2], from_address[3])
+ if from_address != destination:
+ if ignore_unexpected:
+ continue
+ else:
+ raise dns.query.UnexpectedSource(
+ 'got a response from %s instead of %s'
+ % (from_address, destination))
+ try:
+ if _handle_raise_on_truncation:
+ r = dns.message.from_wire(wire,
+ keyring=q.keyring,
+ request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ raise_on_truncation=raise_on_truncation)
+ else:
+ r = dns.message.from_wire(wire,
+ keyring=q.keyring,
+ request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing)
+ if not q.is_response(r):
+ raise dns.query.BadResponse()
+ break
+ except dns.message.Truncated as e:
+ if ignore_errors and not q.is_response(e.message()):
+ continue
+ else:
+ raise
+ except Exception:
+ if ignore_errors:
+ continue
+ else:
+ raise
+ finally:
+ s.close()
+
+ return r
+
+
+def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
+ af=None, source=None, source_port=0,
+ one_rr_per_rrset=False, ignore_trailing=False, sock=None):
+ """coro friendly replacement for dns.query.tcp
+ Return the response obtained after sending a query via TCP.
+
+ @param q: the query
+ @type q: dns.message.Message object
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param timeout: The number of seconds to wait before the query times out.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @rtype: dns.message.Message object
+ @param source: source address. The default is the IPv4 wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @type ignore_unexpected: bool
+ @param one_rr_per_rrset: If True, put each RR into its own
+ RRset.
+ @type one_rr_per_rrset: bool
+ @param ignore_trailing: If True, ignore trailing
+ junk at end of the received message.
+ @type ignore_trailing: bool
+ @param sock: the socket to use for the
+ query. If None, the default, a socket is created. Note that
+ if a socket is provided, it must be a nonblocking datagram socket,
+ and the source and source_port are ignored.
+ @type sock: socket.socket | None"""
+
+ wire = q.to_wire()
+ if af is None:
+ try:
+ af = dns.inet.af_for_address(where)
+ except:
+ af = dns.inet.AF_INET
+ if af == dns.inet.AF_INET:
+ destination = (where, port)
+ if source is not None:
+ source = (source, source_port)
+ elif af == dns.inet.AF_INET6:
+ destination = (where, port, 0, 0)
+ if source is not None:
+ source = (source, source_port, 0, 0)
+ if sock:
+ s = sock
+ else:
+ s = socket.socket(af, socket.SOCK_STREAM)
+ s.settimeout(timeout)
+ try:
+ expiration = compute_expiration(dns.query, timeout)
+ if source is not None:
+ s.bind(source)
+ while True:
+ try:
+ s.connect(destination)
+ break
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ if expiration - time.time() <= 0.0:
+ raise dns.exception.Timeout
+ eventlet.sleep(0.01)
+ continue
+
+ l = len(wire)
+ # copying the wire into tcpmsg is inefficient, but lets us
+ # avoid writev() or doing a short write that would get pushed
+ # onto the net
+ tcpmsg = struct.pack("!H", l) + wire
+ _net_write(s, tcpmsg, expiration)
+ ldata = _net_read(s, 2, expiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = bytes(_net_read(s, l, expiration))
+ finally:
+ s.close()
+ r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing)
+ if not q.is_response(r):
+ raise dns.query.BadResponse()
+ return r
+
+
+def reset():
+ resolver.clear()
+
+
+# Install our coro-friendly replacements for the tcp and udp query methods.
+dns.query.tcp = tcp
+dns.query.udp = udp
diff --git a/.venv/Lib/site-packages/eventlet/support/greenlets.py b/.venv/Lib/site-packages/eventlet/support/greenlets.py
new file mode 100644
index 0000000..b939328
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/support/greenlets.py
@@ -0,0 +1,4 @@
+import greenlet
+getcurrent = greenlet.greenlet.getcurrent
+GreenletExit = greenlet.greenlet.GreenletExit
+greenlet = greenlet.greenlet
diff --git a/.venv/Lib/site-packages/eventlet/support/psycopg2_patcher.py b/.venv/Lib/site-packages/eventlet/support/psycopg2_patcher.py
new file mode 100644
index 0000000..2f4034a
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/support/psycopg2_patcher.py
@@ -0,0 +1,55 @@
+"""A wait callback to allow psycopg2 cooperation with eventlet.
+
+Use `make_psycopg_green()` to enable eventlet support in Psycopg.
+"""
+
+# Copyright (C) 2010 Daniele Varrazzo
+# and licensed under the MIT license:
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+import psycopg2
+from psycopg2 import extensions
+
+import eventlet.hubs
+
+
+def make_psycopg_green():
+ """Configure Psycopg to be used with eventlet in non-blocking way."""
+ if not hasattr(extensions, 'set_wait_callback'):
+ raise ImportError(
+ "support for coroutines not available in this Psycopg version (%s)"
+ % psycopg2.__version__)
+
+ extensions.set_wait_callback(eventlet_wait_callback)
+
+
+def eventlet_wait_callback(conn, timeout=-1):
+ """A wait callback useful to allow eventlet to work with Psycopg."""
+ while 1:
+ state = conn.poll()
+ if state == extensions.POLL_OK:
+ break
+ elif state == extensions.POLL_READ:
+ eventlet.hubs.trampoline(conn.fileno(), read=True)
+ elif state == extensions.POLL_WRITE:
+ eventlet.hubs.trampoline(conn.fileno(), write=True)
+ else:
+ raise psycopg2.OperationalError(
+ "Bad result from poll: %r" % state)
diff --git a/.venv/Lib/site-packages/eventlet/support/pylib.py b/.venv/Lib/site-packages/eventlet/support/pylib.py
new file mode 100644
index 0000000..fdb0682
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/support/pylib.py
@@ -0,0 +1,12 @@
+from py.magic import greenlet
+
+import sys
+import types
+
+
+def emulate():
+ module = types.ModuleType('greenlet')
+ sys.modules['greenlet'] = module
+ module.greenlet = greenlet
+ module.getcurrent = greenlet.getcurrent
+ module.GreenletExit = greenlet.GreenletExit
diff --git a/.venv/Lib/site-packages/eventlet/support/stacklesspypys.py b/.venv/Lib/site-packages/eventlet/support/stacklesspypys.py
new file mode 100644
index 0000000..fe3638a
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/support/stacklesspypys.py
@@ -0,0 +1,12 @@
+from stackless import greenlet
+
+import sys
+import types
+
+
+def emulate():
+ module = types.ModuleType('greenlet')
+ sys.modules['greenlet'] = module
+ module.greenlet = greenlet
+ module.getcurrent = greenlet.getcurrent
+ module.GreenletExit = greenlet.GreenletExit
diff --git a/.venv/Lib/site-packages/eventlet/support/stacklesss.py b/.venv/Lib/site-packages/eventlet/support/stacklesss.py
new file mode 100644
index 0000000..9b3951e
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/support/stacklesss.py
@@ -0,0 +1,84 @@
+"""
+Support for using stackless python. Broken and riddled with print statements
+at the moment. Please fix it!
+"""
+
+import sys
+import types
+
+import stackless
+
+caller = None
+coro_args = {}
+tasklet_to_greenlet = {}
+
+
+def getcurrent():
+ return tasklet_to_greenlet[stackless.getcurrent()]
+
+
+class FirstSwitch:
+ def __init__(self, gr):
+ self.gr = gr
+
+ def __call__(self, *args, **kw):
+ # print("first call", args, kw)
+ gr = self.gr
+ del gr.switch
+ run, gr.run = gr.run, None
+ t = stackless.tasklet(run)
+ gr.t = t
+ tasklet_to_greenlet[t] = gr
+ t.setup(*args, **kw)
+ t.run()
+
+
+class greenlet:
+ def __init__(self, run=None, parent=None):
+ self.dead = False
+ if parent is None:
+ parent = getcurrent()
+
+ self.parent = parent
+ if run is not None:
+ self.run = run
+
+ self.switch = FirstSwitch(self)
+
+ def switch(self, *args):
+ # print("switch", args)
+ global caller
+ caller = stackless.getcurrent()
+ coro_args[self] = args
+ self.t.insert()
+ stackless.schedule()
+ if caller is not self.t:
+ caller.remove()
+ rval = coro_args[self]
+ return rval
+
+ def run(self):
+ pass
+
+ def __bool__(self):
+ return self.run is None and not self.dead
+
+
+class GreenletExit(Exception):
+ pass
+
+
+def emulate():
+ module = types.ModuleType('greenlet')
+ sys.modules['greenlet'] = module
+ module.greenlet = greenlet
+ module.getcurrent = getcurrent
+ module.GreenletExit = GreenletExit
+
+ caller = stackless.getcurrent()
+ tasklet_to_greenlet[caller] = None
+ main_coro = greenlet()
+ tasklet_to_greenlet[caller] = main_coro
+ main_coro.t = caller
+ del main_coro.switch # It's already running
+ coro_args[main_coro] = None
diff --git a/.venv/Lib/site-packages/eventlet/timeout.py b/.venv/Lib/site-packages/eventlet/timeout.py
new file mode 100644
index 0000000..4ab893e
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/timeout.py
@@ -0,0 +1,184 @@
+# Copyright (c) 2009-2010 Denis Bilenko, denis.bilenko at gmail com
+# Copyright (c) 2010 Eventlet Contributors (see AUTHORS)
+# and licensed under the MIT license:
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+import functools
+import inspect
+
+import eventlet
+from eventlet.support import greenlets as greenlet
+from eventlet.hubs import get_hub
+
+__all__ = ['Timeout', 'with_timeout', 'wrap_is_timeout', 'is_timeout']
+
+_MISSING = object()
+
+# deriving from BaseException so that "except Exception as e" doesn't catch
+# Timeout exceptions.
+
+
+class Timeout(BaseException):
+ """Raises *exception* in the current greenthread after *timeout* seconds.
+
+ When *exception* is omitted or ``None``, the :class:`Timeout` instance
+ itself is raised. If *seconds* is None, the timer is not scheduled, and is
+ only useful if you're planning to raise it directly.
+
+ Timeout objects are context managers, and so can be used in with statements.
+ When used in a with statement, if *exception* is ``False``, the timeout is
+ still raised, but the context manager suppresses it, so the code outside the
+ with-block won't see it.
+ """
+
+ def __init__(self, seconds=None, exception=None):
+ self.seconds = seconds
+ self.exception = exception
+ self.timer = None
+ self.start()
+
+ def start(self):
+ """Schedule the timeout. This is called on construction, so
+ it should not be called explicitly, unless the timer has been
+ canceled."""
+ assert not self.pending, \
+ '%r is already started; to restart it, cancel it first' % self
+ if self.seconds is None: # "fake" timeout (never expires)
+ self.timer = None
+ elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self
+ self.timer = get_hub().schedule_call_global(
+ self.seconds, greenlet.getcurrent().throw, self)
+ else: # regular timeout with user-provided exception
+ self.timer = get_hub().schedule_call_global(
+ self.seconds, greenlet.getcurrent().throw, self.exception)
+ return self
+
+ @property
+ def pending(self):
+ """True if the timeout is scheduled to be raised."""
+ if self.timer is not None:
+ return self.timer.pending
+ else:
+ return False
+
+ def cancel(self):
+ """If the timeout is pending, cancel it. If not using
+ Timeouts in ``with`` statements, always call cancel() in a
+ ``finally`` after the block of code that is getting timed out.
+ If not canceled, the timeout will be raised later on, in some
+ unexpected section of the application."""
+ if self.timer is not None:
+ self.timer.cancel()
+ self.timer = None
+
+ def __repr__(self):
+ classname = self.__class__.__name__
+ if self.pending:
+ pending = ' pending'
+ else:
+ pending = ''
+ if self.exception is None:
+ exception = ''
+ else:
+ exception = ' exception=%r' % self.exception
+ return '<%s at %s seconds=%s%s%s>' % (
+ classname, hex(id(self)), self.seconds, exception, pending)
+
+ def __str__(self):
+ """
+ >>> raise Timeout # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ Timeout
+ """
+ if self.seconds is None:
+ return ''
+ if self.seconds == 1:
+ suffix = ''
+ else:
+ suffix = 's'
+ if self.exception is None or self.exception is True:
+ return '%s second%s' % (self.seconds, suffix)
+ elif self.exception is False:
+ return '%s second%s (silent)' % (self.seconds, suffix)
+ else:
+ return '%s second%s (%s)' % (self.seconds, suffix, self.exception)
+
+ def __enter__(self):
+ if self.timer is None:
+ self.start()
+ return self
+
+ def __exit__(self, typ, value, tb):
+ self.cancel()
+ if value is self and self.exception is False:
+ return True
+
+ @property
+ def is_timeout(self):
+ return True
+
+
+def with_timeout(seconds, function, *args, **kwds):
+ """Wrap a call to some (yielding) function with a timeout; if the called
+ function fails to return before the timeout, cancel it and return a flag
+ value.
+ """
+ timeout_value = kwds.pop("timeout_value", _MISSING)
+ timeout = Timeout(seconds)
+ try:
+ try:
+ return function(*args, **kwds)
+ except Timeout as ex:
+ if ex is timeout and timeout_value is not _MISSING:
+ return timeout_value
+ raise
+ finally:
+ timeout.cancel()
+
+
+def wrap_is_timeout(base):
+ '''Adds `.is_timeout=True` attribute to objects returned by `base()`.
+
+ When `base` is class, attribute is added as read-only property. Returns `base`.
+ Otherwise, it returns a function that sets attribute on result of `base()` call.
+
+ Wrappers make best effort to be transparent.
+ '''
+ if inspect.isclass(base):
+ base.is_timeout = property(lambda _: True)
+ return base
+
+ @functools.wraps(base)
+ def fun(*args, **kwargs):
+ ex = base(*args, **kwargs)
+ ex.is_timeout = True
+ return ex
+ return fun
+
+
+if isinstance(__builtins__, dict): # seen when running tests on py310, but HOW??
+ _timeout_err = __builtins__.get('TimeoutError', Timeout)
+else:
+ _timeout_err = getattr(__builtins__, 'TimeoutError', Timeout)
+
+
+def is_timeout(obj):
+ return bool(getattr(obj, 'is_timeout', False)) or isinstance(obj, _timeout_err)
diff --git a/.venv/Lib/site-packages/eventlet/tpool.py b/.venv/Lib/site-packages/eventlet/tpool.py
new file mode 100644
index 0000000..1a3f412
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/tpool.py
@@ -0,0 +1,336 @@
+# Copyright (c) 2007-2009, Linden Research, Inc.
+# Copyright (c) 2007, IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import atexit
+try:
+ import _imp as imp
+except ImportError:
+ import imp
+import os
+import sys
+import traceback
+
+import eventlet
+from eventlet import event, greenio, greenthread, patcher, timeout
+
+__all__ = ['execute', 'Proxy', 'killall', 'set_num_threads']
+
+
+EXC_CLASSES = (Exception, timeout.Timeout)
+SYS_EXCS = (GeneratorExit, KeyboardInterrupt, SystemExit)
+
+QUIET = True
+
+socket = patcher.original('socket')
+threading = patcher.original('threading')
+Queue_module = patcher.original('queue')
+
+Empty = Queue_module.Empty
+Queue = Queue_module.Queue
+
+_bytetosend = b' '
+_coro = None
+_nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 20))
+_reqq = _rspq = None
+_rsock = _wsock = None
+_setup_already = False
+_threads = []
+
+
+def tpool_trampoline():
+ global _rspq
+ while True:
+ try:
+ _c = _rsock.recv(1)
+ assert _c
+ # FIXME: this is probably redundant since using sockets instead of pipe now
+ except ValueError:
+ break # will be raised when pipe is closed
+ while not _rspq.empty():
+ try:
+ (e, rv) = _rspq.get(block=False)
+ e.send(rv)
+ e = rv = None
+ except Empty:
+ pass
+
+
+def tworker():
+ global _rspq
+ while True:
+ try:
+ msg = _reqq.get()
+ except AttributeError:
+ return # can't get anything off of a dud queue
+ if msg is None:
+ return
+ (e, meth, args, kwargs) = msg
+ rv = None
+ try:
+ rv = meth(*args, **kwargs)
+ except SYS_EXCS:
+ raise
+ except EXC_CLASSES:
+ rv = sys.exc_info()
+ traceback.clear_frames(rv[1].__traceback__)
+ # test_leakage_from_tracebacks verifies that the use of
+ # exc_info does not lead to memory leaks
+ _rspq.put((e, rv))
+ msg = meth = args = kwargs = e = rv = None
+ _wsock.sendall(_bytetosend)
+
+
+def execute(meth, *args, **kwargs):
+ """
+ Execute *meth* in a Python thread, blocking the current coroutine/
+ greenthread until the method completes.
+
+ The primary use case for this is to wrap an object or module that is not
+ amenable to monkeypatching or any of the other tricks that Eventlet uses
+ to achieve cooperative yielding. With tpool, you can force such objects to
+ cooperate with green threads by sticking them in native threads, at the cost
+ of some overhead.
+ """
+ setup()
+ # if already in tpool, don't recurse into the tpool
+ # also, call functions directly if we're inside an import lock, because
+ # if meth does any importing (sadly common), it will hang
+ my_thread = threading.current_thread()
+ if my_thread in _threads or imp.lock_held() or _nthreads == 0:
+ return meth(*args, **kwargs)
+
+ e = event.Event()
+ _reqq.put((e, meth, args, kwargs))
+
+ rv = e.wait()
+ if isinstance(rv, tuple) \
+ and len(rv) == 3 \
+ and isinstance(rv[1], EXC_CLASSES):
+ (c, e, tb) = rv
+ if not QUIET:
+ traceback.print_exception(c, e, tb)
+ traceback.print_stack()
+ raise e.with_traceback(tb)
+ return rv
+
+
+def proxy_call(autowrap, f, *args, **kwargs):
+ """
+ Call a function *f* and returns the value. If the type of the return value
+ is in the *autowrap* collection, then it is wrapped in a :class:`Proxy`
+ object before return.
+
+ Normally *f* will be called in the threadpool with :func:`execute`; if the
+ keyword argument "nonblocking" is set to ``True``, it will simply be
+ executed directly. This is useful if you have an object which has methods
+ that don't need to be called in a separate thread, but which return objects
+ that should be Proxy wrapped.
+ """
+ if kwargs.pop('nonblocking', False):
+ rv = f(*args, **kwargs)
+ else:
+ rv = execute(f, *args, **kwargs)
+ if isinstance(rv, autowrap):
+ return Proxy(rv, autowrap)
+ else:
+ return rv
+
+
+class Proxy:
+ """
+ a simple proxy-wrapper of any object that comes with a
+ methods-only interface, in order to forward every method
+ invocation onto a thread in the native-thread pool. A key
+ restriction is that the object's methods should not switch
+ greenlets or use Eventlet primitives, since they are in a
+ different thread from the main hub, and therefore might behave
+ unexpectedly. This is for running native-threaded code
+ only.
+
+ It's common to want to have some of the attributes or return
+ values also wrapped in Proxy objects (for example, database
+ connection objects produce cursor objects which also should be
+ wrapped in Proxy objects to remain nonblocking). *autowrap*, if
+ supplied, is a collection of types; if an attribute or return
+ value matches one of those types (via isinstance), it will be
+ wrapped in a Proxy. *autowrap_names* is a collection
+ of strings, which represent the names of attributes that should be
+ wrapped in Proxy objects when accessed.
+ """
+
+ def __init__(self, obj, autowrap=(), autowrap_names=()):
+ self._obj = obj
+ self._autowrap = autowrap
+ self._autowrap_names = autowrap_names
+
+ def __getattr__(self, attr_name):
+ f = getattr(self._obj, attr_name)
+ if not hasattr(f, '__call__'):
+ if isinstance(f, self._autowrap) or attr_name in self._autowrap_names:
+ return Proxy(f, self._autowrap)
+ return f
+
+ def doit(*args, **kwargs):
+ result = proxy_call(self._autowrap, f, *args, **kwargs)
+ if attr_name in self._autowrap_names and not isinstance(result, Proxy):
+ return Proxy(result)
+ return result
+ return doit
+
+ # the following are a buncha methods that the python interpeter
+ # doesn't use getattr to retrieve and therefore have to be defined
+ # explicitly
+ def __getitem__(self, key):
+ return proxy_call(self._autowrap, self._obj.__getitem__, key)
+
+ def __setitem__(self, key, value):
+ return proxy_call(self._autowrap, self._obj.__setitem__, key, value)
+
+ def __deepcopy__(self, memo=None):
+ return proxy_call(self._autowrap, self._obj.__deepcopy__, memo)
+
+ def __copy__(self, memo=None):
+ return proxy_call(self._autowrap, self._obj.__copy__, memo)
+
+ def __call__(self, *a, **kw):
+ if '__call__' in self._autowrap_names:
+ return Proxy(proxy_call(self._autowrap, self._obj, *a, **kw))
+ else:
+ return proxy_call(self._autowrap, self._obj, *a, **kw)
+
+ def __enter__(self):
+ return proxy_call(self._autowrap, self._obj.__enter__)
+
+ def __exit__(self, *exc):
+ return proxy_call(self._autowrap, self._obj.__exit__, *exc)
+
+ # these don't go through a proxy call, because they're likely to
+ # be called often, and are unlikely to be implemented on the
+ # wrapped object in such a way that they would block
+ def __eq__(self, rhs):
+ return self._obj == rhs
+
+ def __hash__(self):
+ return self._obj.__hash__()
+
+ def __repr__(self):
+ return self._obj.__repr__()
+
+ def __str__(self):
+ return self._obj.__str__()
+
+ def __len__(self):
+ return len(self._obj)
+
+ def __nonzero__(self):
+ return bool(self._obj)
+ # Python3
+ __bool__ = __nonzero__
+
+ def __iter__(self):
+ it = iter(self._obj)
+ if it == self._obj:
+ return self
+ else:
+ return Proxy(it)
+
+ def next(self):
+ return proxy_call(self._autowrap, next, self._obj)
+ # Python3
+ __next__ = next
+
+
+def setup():
+ global _rsock, _wsock, _coro, _setup_already, _rspq, _reqq
+ if _setup_already:
+ return
+ else:
+ _setup_already = True
+
+ assert _nthreads >= 0, "Can't specify negative number of threads"
+ if _nthreads == 0:
+ import warnings
+ warnings.warn("Zero threads in tpool. All tpool.execute calls will\
+ execute in main thread. Check the value of the environment \
+ variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
+ _reqq = Queue(maxsize=-1)
+ _rspq = Queue(maxsize=-1)
+
+ # connected socket pair
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(('127.0.0.1', 0))
+ sock.listen(1)
+ csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ csock.connect(sock.getsockname())
+ csock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
+ _wsock, _addr = sock.accept()
+ _wsock.settimeout(None)
+ _wsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
+ sock.close()
+ _rsock = greenio.GreenSocket(csock)
+ _rsock.settimeout(None)
+
+ for i in range(_nthreads):
+ t = threading.Thread(target=tworker,
+ name="tpool_thread_%s" % i)
+ t.daemon = True
+ t.start()
+ _threads.append(t)
+
+ _coro = greenthread.spawn_n(tpool_trampoline)
+ # This yield fixes subtle error with GreenSocket.__del__
+ eventlet.sleep(0)
+
+
+# Avoid ResourceWarning unclosed socket on Python3.2+
+@atexit.register
+def killall():
+ global _setup_already, _rspq, _rsock, _wsock
+ if not _setup_already:
+ return
+
+ # This yield fixes freeze in some scenarios
+ eventlet.sleep(0)
+
+ for thr in _threads:
+ _reqq.put(None)
+ for thr in _threads:
+ thr.join()
+ del _threads[:]
+
+ # return any remaining results
+ while (_rspq is not None) and not _rspq.empty():
+ try:
+ (e, rv) = _rspq.get(block=False)
+ e.send(rv)
+ e = rv = None
+ except Empty:
+ pass
+
+ if _coro is not None:
+ greenthread.kill(_coro)
+ if _rsock is not None:
+ _rsock.close()
+ _rsock = None
+ if _wsock is not None:
+ _wsock.close()
+ _wsock = None
+ _rspq = None
+ _setup_already = False
+
+
+def set_num_threads(nthreads):
+ global _nthreads
+ _nthreads = nthreads
diff --git a/.venv/Lib/site-packages/eventlet/websocket.py b/.venv/Lib/site-packages/eventlet/websocket.py
new file mode 100644
index 0000000..3d50f70
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/websocket.py
@@ -0,0 +1,868 @@
+import base64
+import codecs
+import collections
+import errno
+from random import Random
+from socket import error as SocketError
+import string
+import struct
+import sys
+import time
+
+import zlib
+
+try:
+ from hashlib import md5, sha1
+except ImportError: # pragma NO COVER
+ from md5 import md5
+ from sha import sha as sha1
+
+from eventlet import semaphore
+from eventlet import wsgi
+from eventlet.green import socket
+from eventlet.support import get_errno
+
+# Python 2's utf8 decoding is more lenient than we'd like
+# In order to pass autobahn's testsuite we need stricter validation
+# if available...
+for _mod in ('wsaccel.utf8validator', 'autobahn.utf8validator'):
+ # autobahn has it's own python-based validator. in newest versions
+ # this prefers to use wsaccel, a cython based implementation, if available.
+ # wsaccel may also be installed w/out autobahn, or with a earlier version.
+ try:
+ utf8validator = __import__(_mod, {}, {}, [''])
+ except ImportError:
+ utf8validator = None
+ else:
+ break
+
+ACCEPTABLE_CLIENT_ERRORS = {errno.ECONNRESET, errno.EPIPE, errno.ESHUTDOWN}
+DEFAULT_MAX_FRAME_LENGTH = 8 << 20
+
+__all__ = ["WebSocketWSGI", "WebSocket"]
+PROTOCOL_GUID = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+VALID_CLOSE_STATUS = set(
+ list(range(1000, 1004)) +
+ list(range(1007, 1012)) +
+ # 3000-3999: reserved for use by libraries, frameworks,
+ # and applications
+ list(range(3000, 4000)) +
+ # 4000-4999: reserved for private use and thus can't
+ # be registered
+ list(range(4000, 5000))
+)
+
+
+class BadRequest(Exception):
+ def __init__(self, status='400 Bad Request', body=None, headers=None):
+ super(Exception, self).__init__()
+ self.status = status
+ self.body = body
+ self.headers = headers
+
+
+class WebSocketWSGI:
+ """Wraps a websocket handler function in a WSGI application.
+
+ Use it like this::
+
+ @websocket.WebSocketWSGI
+ def my_handler(ws):
+ from_browser = ws.wait()
+ ws.send("from server")
+
+ The single argument to the function will be an instance of
+ :class:`WebSocket`. To close the socket, simply return from the
+ function. Note that the server will log the websocket request at
+ the time of closure.
+
+ An optional argument max_frame_length can be given, which will set the
+ maximum incoming *uncompressed* payload length of a frame. By default, this
+ is set to 8MiB. Note that excessive values here might create a DOS attack
+ vector.
+ """
+
+ def __init__(self, handler, max_frame_length=DEFAULT_MAX_FRAME_LENGTH):
+ self.handler = handler
+ self.protocol_version = None
+ self.support_legacy_versions = True
+ self.supported_protocols = []
+ self.origin_checker = None
+ self.max_frame_length = max_frame_length
+
+ @classmethod
+ def configured(cls,
+ handler=None,
+ supported_protocols=None,
+ origin_checker=None,
+ support_legacy_versions=False):
+ def decorator(handler):
+ inst = cls(handler)
+ inst.support_legacy_versions = support_legacy_versions
+ inst.origin_checker = origin_checker
+ if supported_protocols:
+ inst.supported_protocols = supported_protocols
+ return inst
+ if handler is None:
+ return decorator
+ return decorator(handler)
+
+ def __call__(self, environ, start_response):
+ http_connection_parts = [
+ part.strip()
+ for part in environ.get('HTTP_CONNECTION', '').lower().split(',')]
+ if not ('upgrade' in http_connection_parts and
+ environ.get('HTTP_UPGRADE', '').lower() == 'websocket'):
+ # need to check a few more things here for true compliance
+ start_response('400 Bad Request', [('Connection', 'close')])
+ return []
+
+ try:
+ if 'HTTP_SEC_WEBSOCKET_VERSION' in environ:
+ ws = self._handle_hybi_request(environ)
+ elif self.support_legacy_versions:
+ ws = self._handle_legacy_request(environ)
+ else:
+ raise BadRequest()
+ except BadRequest as e:
+ status = e.status
+ body = e.body or b''
+ headers = e.headers or []
+ start_response(status,
+ [('Connection', 'close'), ] + headers)
+ return [body]
+
+ # We're ready to switch protocols; if running under Eventlet
+ # (this is not always the case) then flag the connection as
+ # idle to play well with a graceful stop
+ if 'eventlet.set_idle' in environ:
+ environ['eventlet.set_idle']()
+ try:
+ self.handler(ws)
+ except OSError as e:
+ if get_errno(e) not in ACCEPTABLE_CLIENT_ERRORS:
+ raise
+ # Make sure we send the closing frame
+ ws._send_closing_frame(True)
+ # use this undocumented feature of eventlet.wsgi to ensure that it
+ # doesn't barf on the fact that we didn't call start_response
+ wsgi.WSGI_LOCAL.already_handled = True
+ return []
+
+ def _handle_legacy_request(self, environ):
+ if 'eventlet.input' in environ:
+ sock = environ['eventlet.input'].get_socket()
+ elif 'gunicorn.socket' in environ:
+ sock = environ['gunicorn.socket']
+ else:
+ raise Exception('No eventlet.input or gunicorn.socket present in environ.')
+
+ if 'HTTP_SEC_WEBSOCKET_KEY1' in environ:
+ self.protocol_version = 76
+ if 'HTTP_SEC_WEBSOCKET_KEY2' not in environ:
+ raise BadRequest()
+ else:
+ self.protocol_version = 75
+
+ if self.protocol_version == 76:
+ key1 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY1'])
+ key2 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY2'])
+ # There's no content-length header in the request, but it has 8
+ # bytes of data.
+ environ['wsgi.input'].content_length = 8
+ key3 = environ['wsgi.input'].read(8)
+ key = struct.pack(">II", key1, key2) + key3
+ response = md5(key).digest()
+
+ # Start building the response
+ scheme = 'ws'
+ if environ.get('wsgi.url_scheme') == 'https':
+ scheme = 'wss'
+ location = '%s://%s%s%s' % (
+ scheme,
+ environ.get('HTTP_HOST'),
+ environ.get('SCRIPT_NAME'),
+ environ.get('PATH_INFO')
+ )
+ qs = environ.get('QUERY_STRING')
+ if qs is not None:
+ location += '?' + qs
+ if self.protocol_version == 75:
+ handshake_reply = (
+ b"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
+ b"Upgrade: WebSocket\r\n"
+ b"Connection: Upgrade\r\n"
+ b"WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n"
+ b"WebSocket-Location: " + location.encode() + b"\r\n\r\n"
+ )
+ elif self.protocol_version == 76:
+ handshake_reply = (
+ b"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
+ b"Upgrade: WebSocket\r\n"
+ b"Connection: Upgrade\r\n"
+ b"Sec-WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n"
+ b"Sec-WebSocket-Protocol: " +
+ environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default').encode() + b"\r\n"
+ b"Sec-WebSocket-Location: " + location.encode() + b"\r\n"
+ b"\r\n" + response
+ )
+ else: # pragma NO COVER
+ raise ValueError("Unknown WebSocket protocol version.")
+ sock.sendall(handshake_reply)
+ return WebSocket(sock, environ, self.protocol_version)
+
+ def _parse_extension_header(self, header):
+ if header is None:
+ return None
+ res = {}
+ for ext in header.split(","):
+ parts = ext.split(";")
+ config = {}
+ for part in parts[1:]:
+ key_val = part.split("=")
+ if len(key_val) == 1:
+ config[key_val[0].strip().lower()] = True
+ else:
+ config[key_val[0].strip().lower()] = key_val[1].strip().strip('"').lower()
+ res.setdefault(parts[0].strip().lower(), []).append(config)
+ return res
+
+ def _negotiate_permessage_deflate(self, extensions):
+ if not extensions:
+ return None
+ deflate = extensions.get("permessage-deflate")
+ if deflate is None:
+ return None
+ for config in deflate:
+ # We'll evaluate each config in the client's preferred order and pick
+ # the first that we can support.
+ want_config = {
+ # These are bool options, we can support both
+ "server_no_context_takeover": config.get("server_no_context_takeover", False),
+ "client_no_context_takeover": config.get("client_no_context_takeover", False)
+ }
+ # These are either bool OR int options. True means the client can accept a value
+ # for the option, a number means the client wants that specific value.
+ max_wbits = min(zlib.MAX_WBITS, 15)
+ mwb = config.get("server_max_window_bits")
+ if mwb is not None:
+ if mwb is True:
+ want_config["server_max_window_bits"] = max_wbits
+ else:
+ want_config["server_max_window_bits"] = \
+ int(config.get("server_max_window_bits", max_wbits))
+ if not (8 <= want_config["server_max_window_bits"] <= 15):
+ continue
+ mwb = config.get("client_max_window_bits")
+ if mwb is not None:
+ if mwb is True:
+ want_config["client_max_window_bits"] = max_wbits
+ else:
+ want_config["client_max_window_bits"] = \
+ int(config.get("client_max_window_bits", max_wbits))
+ if not (8 <= want_config["client_max_window_bits"] <= 15):
+ continue
+ return want_config
+ return None
+
+ def _format_extension_header(self, parsed_extensions):
+ if not parsed_extensions:
+ return None
+ parts = []
+ for name, config in parsed_extensions.items():
+ ext_parts = [name.encode()]
+ for key, value in config.items():
+ if value is False:
+ pass
+ elif value is True:
+ ext_parts.append(key.encode())
+ else:
+ ext_parts.append(("%s=%s" % (key, str(value))).encode())
+ parts.append(b"; ".join(ext_parts))
+ return b", ".join(parts)
+
+ def _handle_hybi_request(self, environ):
+ if 'eventlet.input' in environ:
+ sock = environ['eventlet.input'].get_socket()
+ elif 'gunicorn.socket' in environ:
+ sock = environ['gunicorn.socket']
+ else:
+ raise Exception('No eventlet.input or gunicorn.socket present in environ.')
+
+ hybi_version = environ['HTTP_SEC_WEBSOCKET_VERSION']
+ if hybi_version not in ('8', '13', ):
+ raise BadRequest(status='426 Upgrade Required',
+ headers=[('Sec-WebSocket-Version', '8, 13')])
+ self.protocol_version = int(hybi_version)
+ if 'HTTP_SEC_WEBSOCKET_KEY' not in environ:
+ # That's bad.
+ raise BadRequest()
+ origin = environ.get(
+ 'HTTP_ORIGIN',
+ (environ.get('HTTP_SEC_WEBSOCKET_ORIGIN', '')
+ if self.protocol_version <= 8 else ''))
+ if self.origin_checker is not None:
+ if not self.origin_checker(environ.get('HTTP_HOST'), origin):
+ raise BadRequest(status='403 Forbidden')
+ protocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', None)
+ negotiated_protocol = None
+ if protocols:
+ for p in (i.strip() for i in protocols.split(',')):
+ if p in self.supported_protocols:
+ negotiated_protocol = p
+ break
+
+ key = environ['HTTP_SEC_WEBSOCKET_KEY']
+ response = base64.b64encode(sha1(key.encode() + PROTOCOL_GUID).digest())
+ handshake_reply = [b"HTTP/1.1 101 Switching Protocols",
+ b"Upgrade: websocket",
+ b"Connection: Upgrade",
+ b"Sec-WebSocket-Accept: " + response]
+ if negotiated_protocol:
+ handshake_reply.append(b"Sec-WebSocket-Protocol: " + negotiated_protocol.encode())
+
+ parsed_extensions = {}
+ extensions = self._parse_extension_header(environ.get("HTTP_SEC_WEBSOCKET_EXTENSIONS"))
+
+ deflate = self._negotiate_permessage_deflate(extensions)
+ if deflate is not None:
+ parsed_extensions["permessage-deflate"] = deflate
+
+ formatted_ext = self._format_extension_header(parsed_extensions)
+ if formatted_ext is not None:
+ handshake_reply.append(b"Sec-WebSocket-Extensions: " + formatted_ext)
+
+ sock.sendall(b'\r\n'.join(handshake_reply) + b'\r\n\r\n')
+ return RFC6455WebSocket(sock, environ, self.protocol_version,
+ protocol=negotiated_protocol,
+ extensions=parsed_extensions,
+ max_frame_length=self.max_frame_length)
+
+ def _extract_number(self, value):
+ """
+ Utility function which, given a string like 'g98sd 5[]221@1', will
+ return 9852211. Used to parse the Sec-WebSocket-Key headers.
+ """
+ out = ""
+ spaces = 0
+ for char in value:
+ if char in string.digits:
+ out += char
+ elif char == " ":
+ spaces += 1
+ return int(out) // spaces
+
+
+class WebSocket:
+ """A websocket object that handles the details of
+ serialization/deserialization to the socket.
+
+ The primary way to interact with a :class:`WebSocket` object is to
+ call :meth:`send` and :meth:`wait` in order to pass messages back
+ and forth with the browser. Also available are the following
+ properties:
+
+ path
+ The path value of the request. This is the same as the WSGI PATH_INFO variable,
+ but more convenient.
+ protocol
+ The value of the Websocket-Protocol header.
+ origin
+ The value of the 'Origin' header.
+ environ
+ The full WSGI environment for this request.
+
+ """
+
+ def __init__(self, sock, environ, version=76):
+ """
+ :param socket: The eventlet socket
+ :type socket: :class:`eventlet.greenio.GreenSocket`
+ :param environ: The wsgi environment
+ :param version: The WebSocket spec version to follow (default is 76)
+ """
+ self.log = environ.get('wsgi.errors', sys.stderr)
+ self.log_context = 'server={shost}/{spath} client={caddr}:{cport}'.format(
+ shost=environ.get('HTTP_HOST'),
+ spath=environ.get('SCRIPT_NAME', '') + environ.get('PATH_INFO', ''),
+ caddr=environ.get('REMOTE_ADDR'), cport=environ.get('REMOTE_PORT'),
+ )
+ self.socket = sock
+ self.origin = environ.get('HTTP_ORIGIN')
+ self.protocol = environ.get('HTTP_WEBSOCKET_PROTOCOL')
+ self.path = environ.get('PATH_INFO')
+ self.environ = environ
+ self.version = version
+ self.websocket_closed = False
+ self._buf = b""
+ self._msgs = collections.deque()
+ self._sendlock = semaphore.Semaphore()
+
+ def _pack_message(self, message):
+ """Pack the message inside ``00`` and ``FF``
+
+ As per the dataframing section (5.3) for the websocket spec
+ """
+ if isinstance(message, str):
+ message = message.encode('utf-8')
+ elif not isinstance(message, bytes):
+ message = str(message).encode()
+ packed = b"\x00" + message + b"\xFF"
+ return packed
+
+ def _parse_messages(self):
+ """ Parses for messages in the buffer *buf*. It is assumed that
+ the buffer contains the start character for a message, but that it
+ may contain only part of the rest of the message.
+
+ Returns an array of messages, and the buffer remainder that
+ didn't contain any full messages."""
+ msgs = []
+ end_idx = 0
+ buf = self._buf
+ while buf:
+ frame_type = buf[0]
+ if frame_type == 0:
+ # Normal message.
+ end_idx = buf.find(b"\xFF")
+ if end_idx == -1: # pragma NO COVER
+ break
+ msgs.append(buf[1:end_idx].decode('utf-8', 'replace'))
+ buf = buf[end_idx + 1:]
+ elif frame_type == 255:
+ # Closing handshake.
+ assert buf[1] == 0, "Unexpected closing handshake: %r" % buf
+ self.websocket_closed = True
+ break
+ else:
+ raise ValueError("Don't understand how to parse this type of message: %r" % buf)
+ self._buf = buf
+ return msgs
+
+ def send(self, message):
+ """Send a message to the browser.
+
+ *message* should be convertable to a string; unicode objects should be
+ encodable as utf-8. Raises socket.error with errno of 32
+ (broken pipe) if the socket has already been closed by the client."""
+ packed = self._pack_message(message)
+ # if two greenthreads are trying to send at the same time
+ # on the same socket, sendlock prevents interleaving and corruption
+ self._sendlock.acquire()
+ try:
+ self.socket.sendall(packed)
+ finally:
+ self._sendlock.release()
+
+ def wait(self):
+ """Waits for and deserializes messages.
+
+ Returns a single message; the oldest not yet processed. If the client
+ has already closed the connection, returns None. This is different
+ from normal socket behavior because the empty string is a valid
+ websocket message."""
+ while not self._msgs:
+ # Websocket might be closed already.
+ if self.websocket_closed:
+ return None
+ # no parsed messages, must mean buf needs more data
+ delta = self.socket.recv(8096)
+ if delta == b'':
+ return None
+ self._buf += delta
+ msgs = self._parse_messages()
+ self._msgs.extend(msgs)
+ return self._msgs.popleft()
+
+ def _send_closing_frame(self, ignore_send_errors=False):
+ """Sends the closing frame to the client, if required."""
+ if self.version == 76 and not self.websocket_closed:
+ try:
+ self.socket.sendall(b"\xff\x00")
+ except OSError:
+ # Sometimes, like when the remote side cuts off the connection,
+ # we don't care about this.
+ if not ignore_send_errors: # pragma NO COVER
+ raise
+ self.websocket_closed = True
+
+ def close(self):
+ """Forcibly close the websocket; generally it is preferable to
+ return from the handler method."""
+ try:
+ self._send_closing_frame(True)
+ self.socket.shutdown(True)
+ except OSError as e:
+ if e.errno != errno.ENOTCONN:
+ self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e))
+ finally:
+ self.socket.close()
+
+
+class ConnectionClosedError(Exception):
+ pass
+
+
+class FailedConnectionError(Exception):
+ def __init__(self, status, message):
+ super().__init__(status, message)
+ self.message = message
+ self.status = status
+
+
+class ProtocolError(ValueError):
+ pass
+
+
+class RFC6455WebSocket(WebSocket):
+ def __init__(self, sock, environ, version=13, protocol=None, client=False, extensions=None,
+ max_frame_length=DEFAULT_MAX_FRAME_LENGTH):
+ super().__init__(sock, environ, version)
+ self.iterator = self._iter_frames()
+ self.client = client
+ self.protocol = protocol
+ self.extensions = extensions or {}
+
+ self._deflate_enc = None
+ self._deflate_dec = None
+ self.max_frame_length = max_frame_length
+ self._remote_close_data = None
+
+ class UTF8Decoder:
+ def __init__(self):
+ if utf8validator:
+ self.validator = utf8validator.Utf8Validator()
+ else:
+ self.validator = None
+ decoderclass = codecs.getincrementaldecoder('utf8')
+ self.decoder = decoderclass()
+
+ def reset(self):
+ if self.validator:
+ self.validator.reset()
+ self.decoder.reset()
+
+ def decode(self, data, final=False):
+ if self.validator:
+ valid, eocp, c_i, t_i = self.validator.validate(data)
+ if not valid:
+ raise ValueError('Data is not valid unicode')
+ return self.decoder.decode(data, final)
+
+ def _get_permessage_deflate_enc(self):
+ options = self.extensions.get("permessage-deflate")
+ if options is None:
+ return None
+
+ def _make():
+ return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED,
+ -options.get("client_max_window_bits" if self.client
+ else "server_max_window_bits",
+ zlib.MAX_WBITS))
+
+ if options.get("client_no_context_takeover" if self.client
+ else "server_no_context_takeover"):
+ # This option means we have to make a new one every time
+ return _make()
+ else:
+ if self._deflate_enc is None:
+ self._deflate_enc = _make()
+ return self._deflate_enc
+
+ def _get_permessage_deflate_dec(self, rsv1):
+ options = self.extensions.get("permessage-deflate")
+ if options is None or not rsv1:
+ return None
+
+ def _make():
+ return zlib.decompressobj(-options.get("server_max_window_bits" if self.client
+ else "client_max_window_bits",
+ zlib.MAX_WBITS))
+
+ if options.get("server_no_context_takeover" if self.client
+ else "client_no_context_takeover"):
+ # This option means we have to make a new one every time
+ return _make()
+ else:
+ if self._deflate_dec is None:
+ self._deflate_dec = _make()
+ return self._deflate_dec
+
+ def _get_bytes(self, numbytes):
+ data = b''
+ while len(data) < numbytes:
+ d = self.socket.recv(numbytes - len(data))
+ if not d:
+ raise ConnectionClosedError()
+ data = data + d
+ return data
+
+ class Message:
+ def __init__(self, opcode, max_frame_length, decoder=None, decompressor=None):
+ self.decoder = decoder
+ self.data = []
+ self.finished = False
+ self.opcode = opcode
+ self.decompressor = decompressor
+ self.max_frame_length = max_frame_length
+
+ def push(self, data, final=False):
+ self.finished = final
+ self.data.append(data)
+
+ def getvalue(self):
+ data = b"".join(self.data)
+ if not self.opcode & 8 and self.decompressor:
+ data = self.decompressor.decompress(data + b"\x00\x00\xff\xff", self.max_frame_length)
+ if self.decompressor.unconsumed_tail:
+ raise FailedConnectionError(
+ 1009,
+ "Incoming compressed frame exceeds length limit of {} bytes.".format(self.max_frame_length))
+
+ if self.decoder:
+ data = self.decoder.decode(data, self.finished)
+ return data
+
+ @staticmethod
+ def _apply_mask(data, mask, length=None, offset=0):
+ if length is None:
+ length = len(data)
+ cnt = range(length)
+ return b''.join(bytes((data[i] ^ mask[(offset + i) % 4],)) for i in cnt)
+
+ def _handle_control_frame(self, opcode, data):
+ if opcode == 8: # connection close
+ self._remote_close_data = data
+ if not data:
+ status = 1000
+ elif len(data) > 1:
+ status = struct.unpack_from('!H', data)[0]
+ if not status or status not in VALID_CLOSE_STATUS:
+ raise FailedConnectionError(
+ 1002,
+ "Unexpected close status code.")
+ try:
+ data = self.UTF8Decoder().decode(data[2:], True)
+ except (UnicodeDecodeError, ValueError):
+ raise FailedConnectionError(
+ 1002,
+ "Close message data should be valid UTF-8.")
+ else:
+ status = 1002
+ self.close(close_data=(status, ''))
+ raise ConnectionClosedError()
+ elif opcode == 9: # ping
+ self.send(data, control_code=0xA)
+ elif opcode == 0xA: # pong
+ pass
+ else:
+ raise FailedConnectionError(
+ 1002, "Unknown control frame received.")
+
+ def _iter_frames(self):
+ fragmented_message = None
+ try:
+ while True:
+ message = self._recv_frame(message=fragmented_message)
+ if message.opcode & 8:
+ self._handle_control_frame(
+ message.opcode, message.getvalue())
+ continue
+ if fragmented_message and message is not fragmented_message:
+ raise RuntimeError('Unexpected message change.')
+ fragmented_message = message
+ if message.finished:
+ data = fragmented_message.getvalue()
+ fragmented_message = None
+ yield data
+ except FailedConnectionError:
+ exc_typ, exc_val, exc_tb = sys.exc_info()
+ self.close(close_data=(exc_val.status, exc_val.message))
+ except ConnectionClosedError:
+ return
+ except Exception:
+ self.close(close_data=(1011, 'Internal Server Error'))
+ raise
+
+ def _recv_frame(self, message=None):
+ recv = self._get_bytes
+
+ # Unpacking the frame described in Section 5.2 of RFC6455
+ # (https://tools.ietf.org/html/rfc6455#section-5.2)
+ header = recv(2)
+ a, b = struct.unpack('!BB', header)
+ finished = a >> 7 == 1
+ rsv123 = a >> 4 & 7
+ rsv1 = rsv123 & 4
+ if rsv123:
+ if rsv1 and "permessage-deflate" not in self.extensions:
+ # must be zero - unless it's compressed then rsv1 is true
+ raise FailedConnectionError(
+ 1002,
+ "RSV1, RSV2, RSV3: MUST be 0 unless an extension is"
+ " negotiated that defines meanings for non-zero values.")
+ opcode = a & 15
+ if opcode not in (0, 1, 2, 8, 9, 0xA):
+ raise FailedConnectionError(1002, "Unknown opcode received.")
+ masked = b & 128 == 128
+ if not masked and not self.client:
+ raise FailedConnectionError(1002, "A client MUST mask all frames"
+ " that it sends to the server")
+ length = b & 127
+ if opcode & 8:
+ if not finished:
+ raise FailedConnectionError(1002, "Control frames must not"
+ " be fragmented.")
+ if length > 125:
+ raise FailedConnectionError(
+ 1002,
+ "All control frames MUST have a payload length of 125"
+ " bytes or less")
+ elif opcode and message:
+ raise FailedConnectionError(
+ 1002,
+ "Received a non-continuation opcode within"
+ " fragmented message.")
+ elif not opcode and not message:
+ raise FailedConnectionError(
+ 1002,
+ "Received continuation opcode with no previous"
+ " fragments received.")
+ if length == 126:
+ length = struct.unpack('!H', recv(2))[0]
+ elif length == 127:
+ length = struct.unpack('!Q', recv(8))[0]
+
+ if length > self.max_frame_length:
+ raise FailedConnectionError(1009, "Incoming frame of {} bytes is above length limit of {} bytes.".format(
+ length, self.max_frame_length))
+ if masked:
+ mask = struct.unpack('!BBBB', recv(4))
+ received = 0
+ if not message or opcode & 8:
+ decoder = self.UTF8Decoder() if opcode == 1 else None
+ decompressor = self._get_permessage_deflate_dec(rsv1)
+ message = self.Message(opcode, self.max_frame_length, decoder=decoder, decompressor=decompressor)
+ if not length:
+ message.push(b'', final=finished)
+ else:
+ while received < length:
+ d = self.socket.recv(length - received)
+ if not d:
+ raise ConnectionClosedError()
+ dlen = len(d)
+ if masked:
+ d = self._apply_mask(d, mask, length=dlen, offset=received)
+ received = received + dlen
+ try:
+ message.push(d, final=finished)
+ except (UnicodeDecodeError, ValueError):
+ raise FailedConnectionError(
+ 1007, "Text data must be valid utf-8")
+ return message
+
+ def _pack_message(self, message, masked=False,
+ continuation=False, final=True, control_code=None):
+ is_text = False
+ if isinstance(message, str):
+ message = message.encode('utf-8')
+ is_text = True
+
+ compress_bit = 0
+ compressor = self._get_permessage_deflate_enc()
+ # Control frames are identified by opcodes where the most significant
+ # bit of the opcode is 1. Currently defined opcodes for control frames
+ # include 0x8 (Close), 0x9 (Ping), and 0xA (Pong). Opcodes 0xB-0xF are
+ # reserved for further control frames yet to be defined.
+ # https://datatracker.ietf.org/doc/html/rfc6455#section-5.5
+ is_control_frame = (control_code or 0) & 8
+ # An endpoint MUST NOT set the "Per-Message Compressed" bit of control
+ # frames and non-first fragments of a data message. An endpoint
+ # receiving such a frame MUST _Fail the WebSocket Connection_.
+ # https://datatracker.ietf.org/doc/html/rfc7692#section-6.1
+ if message and compressor and not is_control_frame:
+ message = compressor.compress(message)
+ message += compressor.flush(zlib.Z_SYNC_FLUSH)
+ assert message[-4:] == b"\x00\x00\xff\xff"
+ message = message[:-4]
+ compress_bit = 1 << 6
+
+ length = len(message)
+ if not length:
+ # no point masking empty data
+ masked = False
+ if control_code:
+ if control_code not in (8, 9, 0xA):
+ raise ProtocolError('Unknown control opcode.')
+ if continuation or not final:
+ raise ProtocolError('Control frame cannot be a fragment.')
+ if length > 125:
+ raise ProtocolError('Control frame data too large (>125).')
+ header = struct.pack('!B', control_code | 1 << 7)
+ else:
+ opcode = 0 if continuation else ((1 if is_text else 2) | compress_bit)
+ header = struct.pack('!B', opcode | (1 << 7 if final else 0))
+ lengthdata = 1 << 7 if masked else 0
+ if length > 65535:
+ lengthdata = struct.pack('!BQ', lengthdata | 127, length)
+ elif length > 125:
+ lengthdata = struct.pack('!BH', lengthdata | 126, length)
+ else:
+ lengthdata = struct.pack('!B', lengthdata | length)
+ if masked:
+ # NOTE: RFC6455 states:
+ # A server MUST NOT mask any frames that it sends to the client
+ rand = Random(time.time())
+ mask = [rand.getrandbits(8) for _ in range(4)]
+ message = RFC6455WebSocket._apply_mask(message, mask, length)
+ maskdata = struct.pack('!BBBB', *mask)
+ else:
+ maskdata = b''
+
+ return b''.join((header, lengthdata, maskdata, message))
+
+ def wait(self):
+ for i in self.iterator:
+ return i
+
+ def _send(self, frame):
+ self._sendlock.acquire()
+ try:
+ self.socket.sendall(frame)
+ finally:
+ self._sendlock.release()
+
+ def send(self, message, **kw):
+ kw['masked'] = self.client
+ payload = self._pack_message(message, **kw)
+ self._send(payload)
+
+ def _send_closing_frame(self, ignore_send_errors=False, close_data=None):
+ if self.version in (8, 13) and not self.websocket_closed:
+ if close_data is not None:
+ status, msg = close_data
+ if isinstance(msg, str):
+ msg = msg.encode('utf-8')
+ data = struct.pack('!H', status) + msg
+ else:
+ data = ''
+ try:
+ self.send(data, control_code=8)
+ except OSError:
+ # Sometimes, like when the remote side cuts off the connection,
+ # we don't care about this.
+ if not ignore_send_errors: # pragma NO COVER
+ raise
+ self.websocket_closed = True
+
+ def close(self, close_data=None):
+ """Forcibly close the websocket; generally it is preferable to
+ return from the handler method."""
+ try:
+ self._send_closing_frame(close_data=close_data, ignore_send_errors=True)
+ self.socket.shutdown(socket.SHUT_WR)
+ except OSError as e:
+ if e.errno != errno.ENOTCONN:
+ self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e))
+ finally:
+ self.socket.close()
diff --git a/.venv/Lib/site-packages/eventlet/wsgi.py b/.venv/Lib/site-packages/eventlet/wsgi.py
new file mode 100644
index 0000000..3b530b1
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/wsgi.py
@@ -0,0 +1,1066 @@
+import errno
+import os
+import sys
+import time
+import traceback
+import types
+import urllib.parse
+import warnings
+
+import eventlet
+from eventlet import greenio
+from eventlet import support
+from eventlet.corolocal import local
+from eventlet.green import BaseHTTPServer
+from eventlet.green import socket
+
+
+DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
+DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
+MAX_REQUEST_LINE = 8192
+MAX_HEADER_LINE = 8192
+MAX_TOTAL_HEADER_SIZE = 65536
+MINIMUM_CHUNK_SIZE = 4096
+# %(client_port)s is also available
+DEFAULT_LOG_FORMAT = ('%(client_ip)s - - [%(date_time)s] "%(request_line)s"'
+ ' %(status_code)s %(body_length)s %(wall_seconds).6f')
+RESPONSE_414 = b'''HTTP/1.0 414 Request URI Too Long\r\n\
+Connection: close\r\n\
+Content-Length: 0\r\n\r\n'''
+is_accepting = True
+
+STATE_IDLE = 'idle'
+STATE_REQUEST = 'request'
+STATE_CLOSE = 'close'
+
+__all__ = ['server', 'format_date_time']
+
+# Weekday and month names for HTTP date/time formatting; always English!
+_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+_monthname = [None, # Dummy so we can use 1-based month numbers
+ "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
+
+
+def format_date_time(timestamp):
+ """Formats a unix timestamp into an HTTP standard string."""
+ year, month, day, hh, mm, ss, wd, _y, _z = time.gmtime(timestamp)
+ return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+ _weekdayname[wd], day, _monthname[month], year, hh, mm, ss
+ )
+
+
+def addr_to_host_port(addr):
+ host = 'unix'
+ port = ''
+ if isinstance(addr, tuple):
+ host = addr[0]
+ port = addr[1]
+ return (host, port)
+
+
+# Collections of error codes to compare against. Not all attributes are set
+# on errno module on all platforms, so some are literals :(
+BAD_SOCK = {errno.EBADF, 10053}
+BROKEN_SOCK = {errno.EPIPE, errno.ECONNRESET, errno.ESHUTDOWN}
+
+
+class ChunkReadError(ValueError):
+ pass
+
+
+WSGI_LOCAL = local()
+
+
+class Input:
+
+ def __init__(self,
+ rfile,
+ content_length,
+ sock,
+ wfile=None,
+ wfile_line=None,
+ chunked_input=False):
+
+ self.rfile = rfile
+ self._sock = sock
+ if content_length is not None:
+ content_length = int(content_length)
+ self.content_length = content_length
+
+ self.wfile = wfile
+ self.wfile_line = wfile_line
+
+ self.position = 0
+ self.chunked_input = chunked_input
+ self.chunk_length = -1
+
+ # (optional) headers to send with a "100 Continue" response. Set by
+ # calling set_hundred_continue_respose_headers() on env['wsgi.input']
+ self.hundred_continue_headers = None
+ self.is_hundred_continue_response_sent = False
+
+ # handle_one_response should give us a ref to the response state so we
+ # know whether we can still send the 100 Continue; until then, though,
+ # we're flying blind
+ self.headers_sent = None
+
+ def send_hundred_continue_response(self):
+ if self.headers_sent:
+ # To late; application has already started sending data back
+ # to the client
+ # TODO: maybe log a warning if self.hundred_continue_headers
+ # is not None?
+ return
+
+ towrite = []
+
+ # 100 Continue status line
+ towrite.append(self.wfile_line)
+
+ # Optional headers
+ if self.hundred_continue_headers is not None:
+ # 100 Continue headers
+ for header in self.hundred_continue_headers:
+ towrite.append(('%s: %s\r\n' % header).encode())
+
+ # Blank line
+ towrite.append(b'\r\n')
+
+ self.wfile.writelines(towrite)
+ self.wfile.flush()
+
+ # Reinitialize chunk_length (expect more data)
+ self.chunk_length = -1
+
+ @property
+ def should_send_hundred_continue(self):
+ return self.wfile is not None and not self.is_hundred_continue_response_sent
+
+ def _do_read(self, reader, length=None):
+ if self.should_send_hundred_continue:
+ # 100 Continue response
+ self.send_hundred_continue_response()
+ self.is_hundred_continue_response_sent = True
+ if (self.content_length is not None) and (
+ length is None or length > self.content_length - self.position):
+ length = self.content_length - self.position
+ if not length:
+ return b''
+ try:
+ read = reader(length)
+ except greenio.SSL.ZeroReturnError:
+ read = b''
+ self.position += len(read)
+ return read
+
+ def _chunked_read(self, rfile, length=None, use_readline=False):
+ if self.should_send_hundred_continue:
+ # 100 Continue response
+ self.send_hundred_continue_response()
+ self.is_hundred_continue_response_sent = True
+ try:
+ if length == 0:
+ return b""
+
+ if length and length < 0:
+ length = None
+
+ if use_readline:
+ reader = self.rfile.readline
+ else:
+ reader = self.rfile.read
+
+ response = []
+ while self.chunk_length != 0:
+ maxreadlen = self.chunk_length - self.position
+ if length is not None and length < maxreadlen:
+ maxreadlen = length
+
+ if maxreadlen > 0:
+ data = reader(maxreadlen)
+ if not data:
+ self.chunk_length = 0
+ raise OSError("unexpected end of file while parsing chunked data")
+
+ datalen = len(data)
+ response.append(data)
+
+ self.position += datalen
+ if self.chunk_length == self.position:
+ rfile.readline()
+
+ if length is not None:
+ length -= datalen
+ if length == 0:
+ break
+ if use_readline and data[-1:] == b"\n":
+ break
+ else:
+ try:
+ self.chunk_length = int(rfile.readline().split(b";", 1)[0], 16)
+ except ValueError as err:
+ raise ChunkReadError(err)
+ self.position = 0
+ if self.chunk_length == 0:
+ rfile.readline()
+ except greenio.SSL.ZeroReturnError:
+ pass
+ return b''.join(response)
+
+ def read(self, length=None):
+ if self.chunked_input:
+ return self._chunked_read(self.rfile, length)
+ return self._do_read(self.rfile.read, length)
+
+ def readline(self, size=None):
+ if self.chunked_input:
+ return self._chunked_read(self.rfile, size, True)
+ else:
+ return self._do_read(self.rfile.readline, size)
+
+ def readlines(self, hint=None):
+ if self.chunked_input:
+ lines = []
+ for line in iter(self.readline, b''):
+ lines.append(line)
+ if hint and hint > 0:
+ hint -= len(line)
+ if hint <= 0:
+ break
+ return lines
+ else:
+ return self._do_read(self.rfile.readlines, hint)
+
+ def __iter__(self):
+ return iter(self.read, b'')
+
+ def get_socket(self):
+ return self._sock
+
+ def set_hundred_continue_response_headers(self, headers,
+ capitalize_response_headers=True):
+ # Response headers capitalization (default)
+ # CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
+ # Per HTTP RFC standard, header name is case-insensitive.
+ # Please, fix your client to ignore header case if possible.
+ if capitalize_response_headers:
+ headers = [
+ ('-'.join([x.capitalize() for x in key.split('-')]), value)
+ for key, value in headers]
+ self.hundred_continue_headers = headers
+
+ def discard(self, buffer_size=16 << 10):
+ while self.read(buffer_size):
+ pass
+
+
+class HeaderLineTooLong(Exception):
+ pass
+
+
+class HeadersTooLarge(Exception):
+ pass
+
+
+def get_logger(log, debug):
+ if callable(getattr(log, 'info', None)) \
+ and callable(getattr(log, 'debug', None)):
+ return log
+ else:
+ return LoggerFileWrapper(log or sys.stderr, debug)
+
+
+class LoggerNull:
+ def __init__(self):
+ pass
+
+ def error(self, msg, *args, **kwargs):
+ pass
+
+ def info(self, msg, *args, **kwargs):
+ pass
+
+ def debug(self, msg, *args, **kwargs):
+ pass
+
+ def write(self, msg, *args):
+ pass
+
+
+class LoggerFileWrapper(LoggerNull):
+ def __init__(self, log, debug):
+ self.log = log
+ self._debug = debug
+
+ def error(self, msg, *args, **kwargs):
+ self.write(msg, *args)
+
+ def info(self, msg, *args, **kwargs):
+ self.write(msg, *args)
+
+ def debug(self, msg, *args, **kwargs):
+ if self._debug:
+ self.write(msg, *args)
+
+ def write(self, msg, *args):
+ msg = msg + '\n'
+ if args:
+ msg = msg % args
+ self.log.write(msg)
+
+
+class FileObjectForHeaders:
+
+ def __init__(self, fp):
+ self.fp = fp
+ self.total_header_size = 0
+
+ def readline(self, size=-1):
+ sz = size
+ if size < 0:
+ sz = MAX_HEADER_LINE
+ rv = self.fp.readline(sz)
+ if len(rv) >= MAX_HEADER_LINE:
+ raise HeaderLineTooLong()
+ self.total_header_size += len(rv)
+ if self.total_header_size > MAX_TOTAL_HEADER_SIZE:
+ raise HeadersTooLarge()
+ return rv
+
+
+class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
+ """This class is used to handle the HTTP requests that arrive
+ at the server.
+
+ The handler will parse the request and the headers, then call a method
+ specific to the request type.
+
+ :param conn_state: The given connection status.
+ :param server: The server accessible by the request handler.
+ """
+ protocol_version = 'HTTP/1.1'
+ minimum_chunk_size = MINIMUM_CHUNK_SIZE
+ capitalize_response_headers = True
+ reject_bad_requests = True
+
+ # https://github.com/eventlet/eventlet/issues/295
+ # Stdlib default is 0 (unbuffered), but then `wfile.writelines()` looses data
+ # so before going back to unbuffered, remove any usage of `writelines`.
+ wbufsize = 16 << 10
+
+ def __init__(self, conn_state, server):
+ self.request = conn_state[1]
+ self.client_address = conn_state[0]
+ self.conn_state = conn_state
+ self.server = server
+ self.setup()
+ try:
+ self.handle()
+ finally:
+ self.finish()
+
+ def setup(self):
+ # overriding SocketServer.setup to correctly handle SSL.Connection objects
+ conn = self.connection = self.request
+
+ # TCP_QUICKACK is a better alternative to disabling Nagle's algorithm
+ # https://news.ycombinator.com/item?id=10607422
+ if getattr(socket, 'TCP_QUICKACK', None):
+ try:
+ conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, True)
+ except OSError:
+ pass
+
+ try:
+ self.rfile = conn.makefile('rb', self.rbufsize)
+ self.wfile = conn.makefile('wb', self.wbufsize)
+ except (AttributeError, NotImplementedError):
+ if hasattr(conn, 'send') and hasattr(conn, 'recv'):
+ # it's an SSL.Connection
+ self.rfile = socket._fileobject(conn, "rb", self.rbufsize)
+ self.wfile = socket._fileobject(conn, "wb", self.wbufsize)
+ else:
+ # it's a SSLObject, or a martian
+ raise NotImplementedError(
+ '''eventlet.wsgi doesn't support sockets of type {}'''.format(type(conn)))
+
+ def handle(self):
+ self.close_connection = True
+
+ while True:
+ self.handle_one_request()
+ if self.conn_state[2] == STATE_CLOSE:
+ self.close_connection = 1
+ else:
+ self.conn_state[2] = STATE_IDLE
+ if self.close_connection:
+ break
+
+ def _read_request_line(self):
+ if self.rfile.closed:
+ self.close_connection = 1
+ return ''
+
+ try:
+ sock = self.connection
+ if self.server.keepalive and not isinstance(self.server.keepalive, bool):
+ sock.settimeout(self.server.keepalive)
+ line = self.rfile.readline(self.server.url_length_limit)
+ sock.settimeout(self.server.socket_timeout)
+ return line
+ except greenio.SSL.ZeroReturnError:
+ pass
+ except OSError as e:
+ last_errno = support.get_errno(e)
+ if last_errno in BROKEN_SOCK:
+ self.server.log.debug('({}) connection reset by peer {!r}'.format(
+ self.server.pid,
+ self.client_address))
+ elif last_errno not in BAD_SOCK:
+ raise
+ return ''
+
+ def handle_one_request(self):
+ if self.server.max_http_version:
+ self.protocol_version = self.server.max_http_version
+
+ self.raw_requestline = self._read_request_line()
+ self.conn_state[2] = STATE_REQUEST
+ if not self.raw_requestline:
+ self.close_connection = 1
+ return
+ if len(self.raw_requestline) >= self.server.url_length_limit:
+ self.wfile.write(RESPONSE_414)
+ self.close_connection = 1
+ return
+
+ orig_rfile = self.rfile
+ try:
+ self.rfile = FileObjectForHeaders(self.rfile)
+ if not self.parse_request():
+ return
+ except HeaderLineTooLong:
+ self.wfile.write(
+ b"HTTP/1.0 400 Header Line Too Long\r\n"
+ b"Connection: close\r\nContent-length: 0\r\n\r\n")
+ self.close_connection = 1
+ return
+ except HeadersTooLarge:
+ self.wfile.write(
+ b"HTTP/1.0 400 Headers Too Large\r\n"
+ b"Connection: close\r\nContent-length: 0\r\n\r\n")
+ self.close_connection = 1
+ return
+ finally:
+ self.rfile = orig_rfile
+
+ content_length = self.headers.get('content-length')
+ transfer_encoding = self.headers.get('transfer-encoding')
+ if content_length is not None:
+ try:
+ if int(content_length) < 0:
+ raise ValueError
+ except ValueError:
+ # Negative, or not an int at all
+ self.wfile.write(
+ b"HTTP/1.0 400 Bad Request\r\n"
+ b"Connection: close\r\nContent-length: 0\r\n\r\n")
+ self.close_connection = 1
+ return
+
+ if transfer_encoding is not None:
+ if self.reject_bad_requests:
+ msg = b"Content-Length and Transfer-Encoding are not allowed together\n"
+ self.wfile.write(
+ b"HTTP/1.0 400 Bad Request\r\n"
+ b"Connection: close\r\n"
+ b"Content-Length: %d\r\n"
+ b"\r\n%s" % (len(msg), msg))
+ self.close_connection = 1
+ return
+
+ self.environ = self.get_environ()
+ self.application = self.server.app
+ try:
+ self.server.outstanding_requests += 1
+ try:
+ self.handle_one_response()
+ except OSError as e:
+ # Broken pipe, connection reset by peer
+ if support.get_errno(e) not in BROKEN_SOCK:
+ raise
+ finally:
+ self.server.outstanding_requests -= 1
+
+ def handle_one_response(self):
+ start = time.time()
+ headers_set = []
+ headers_sent = []
+ # Grab the request input now; app may try to replace it in the environ
+ request_input = self.environ['eventlet.input']
+ # Push the headers-sent state into the Input so it won't send a
+ # 100 Continue response if we've already started a response.
+ request_input.headers_sent = headers_sent
+
+ wfile = self.wfile
+ result = None
+ use_chunked = [False]
+ length = [0]
+ status_code = [200]
+
+ def write(data):
+ towrite = []
+ if not headers_set:
+ raise AssertionError("write() before start_response()")
+ elif not headers_sent:
+ status, response_headers = headers_set
+ headers_sent.append(1)
+ header_list = [header[0].lower() for header in response_headers]
+ towrite.append(('%s %s\r\n' % (self.protocol_version, status)).encode())
+ for header in response_headers:
+ towrite.append(('%s: %s\r\n' % header).encode('latin-1'))
+
+ # send Date header?
+ if 'date' not in header_list:
+ towrite.append(('Date: %s\r\n' % (format_date_time(time.time()),)).encode())
+
+ client_conn = self.headers.get('Connection', '').lower()
+ send_keep_alive = False
+ if self.close_connection == 0 and \
+ self.server.keepalive and (client_conn == 'keep-alive' or
+ (self.request_version == 'HTTP/1.1' and
+ not client_conn == 'close')):
+ # only send keep-alives back to clients that sent them,
+ # it's redundant for 1.1 connections
+ send_keep_alive = (client_conn == 'keep-alive')
+ self.close_connection = 0
+ else:
+ self.close_connection = 1
+
+ if 'content-length' not in header_list:
+ if self.request_version == 'HTTP/1.1':
+ use_chunked[0] = True
+ towrite.append(b'Transfer-Encoding: chunked\r\n')
+ elif 'content-length' not in header_list:
+ # client is 1.0 and therefore must read to EOF
+ self.close_connection = 1
+
+ if self.close_connection:
+ towrite.append(b'Connection: close\r\n')
+ elif send_keep_alive:
+ towrite.append(b'Connection: keep-alive\r\n')
+ # Spec says timeout must be an integer, but we allow sub-second
+ int_timeout = int(self.server.keepalive or 0)
+ if not isinstance(self.server.keepalive, bool) and int_timeout:
+ towrite.append(b'Keep-Alive: timeout=%d\r\n' % int_timeout)
+ towrite.append(b'\r\n')
+ # end of header writing
+
+ if use_chunked[0]:
+ # Write the chunked encoding
+ towrite.append(("%x" % (len(data),)).encode() + b"\r\n" + data + b"\r\n")
+ else:
+ towrite.append(data)
+ wfile.writelines(towrite)
+ wfile.flush()
+ length[0] = length[0] + sum(map(len, towrite))
+
+ def start_response(status, response_headers, exc_info=None):
+ status_code[0] = status.split()[0]
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception if headers sent
+ raise exc_info[1].with_traceback(exc_info[2])
+ finally:
+ # Avoid dangling circular ref
+ exc_info = None
+
+ # Response headers capitalization
+ # CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
+ # Per HTTP RFC standard, header name is case-insensitive.
+ # Please, fix your client to ignore header case if possible.
+ if self.capitalize_response_headers:
+ def cap(x):
+ return x.encode('latin1').capitalize().decode('latin1')
+
+ response_headers = [
+ ('-'.join([cap(x) for x in key.split('-')]), value)
+ for key, value in response_headers]
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ try:
+ try:
+ WSGI_LOCAL.already_handled = False
+ result = self.application(self.environ, start_response)
+
+ # Set content-length if possible
+ if headers_set and not headers_sent and hasattr(result, '__len__'):
+ # We've got a complete final response
+ if 'Content-Length' not in [h for h, _v in headers_set[1]]:
+ headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
+ if request_input.should_send_hundred_continue:
+ # We've got a complete final response, and never sent a 100 Continue.
+ # There's no chance we'll need to read the body as we stream out the
+ # response, so we can be nice and send a Connection: close header.
+ self.close_connection = 1
+
+ towrite = []
+ towrite_size = 0
+ just_written_size = 0
+ minimum_write_chunk_size = int(self.environ.get(
+ 'eventlet.minimum_write_chunk_size', self.minimum_chunk_size))
+ for data in result:
+ if len(data) == 0:
+ continue
+ if isinstance(data, str):
+ data = data.encode('ascii')
+
+ towrite.append(data)
+ towrite_size += len(data)
+ if towrite_size >= minimum_write_chunk_size:
+ write(b''.join(towrite))
+ towrite = []
+ just_written_size = towrite_size
+ towrite_size = 0
+ if WSGI_LOCAL.already_handled:
+ self.close_connection = 1
+ return
+ if towrite:
+ just_written_size = towrite_size
+ write(b''.join(towrite))
+ if not headers_sent or (use_chunked[0] and just_written_size):
+ write(b'')
+ except (Exception, eventlet.Timeout):
+ self.close_connection = 1
+ tb = traceback.format_exc()
+ self.server.log.info(tb)
+ if not headers_sent:
+ err_body = tb.encode() if self.server.debug else b''
+ start_response("500 Internal Server Error",
+ [('Content-type', 'text/plain'),
+ ('Content-length', len(err_body))])
+ write(err_body)
+ finally:
+ if hasattr(result, 'close'):
+ result.close()
+ if request_input.should_send_hundred_continue:
+ # We just sent the final response, no 100 Continue. Client may or
+ # may not have started to send a body, and if we keep the connection
+ # open we've seen clients either
+ # * send a body, then start a new request
+ # * skip the body and go straight to a new request
+ # Looks like the most broadly compatible option is to close the
+ # connection and let the client retry.
+ # https://curl.se/mail/lib-2004-08/0002.html
+ # Note that we likely *won't* send a Connection: close header at this point
+ self.close_connection = 1
+
+ if (request_input.chunked_input or
+ request_input.position < (request_input.content_length or 0)):
+ # Read and discard body if connection is going to be reused
+ if self.close_connection == 0:
+ try:
+ request_input.discard()
+ except ChunkReadError as e:
+ self.close_connection = 1
+ self.server.log.error((
+ 'chunked encoding error while discarding request body.'
+ + ' client={0} request="{1}" error="{2}"').format(
+ self.get_client_address()[0], self.requestline, e,
+ ))
+ except OSError as e:
+ self.close_connection = 1
+ self.server.log.error((
+ 'I/O error while discarding request body.'
+ + ' client={0} request="{1}" error="{2}"').format(
+ self.get_client_address()[0], self.requestline, e,
+ ))
+ finish = time.time()
+
+ for hook, args, kwargs in self.environ['eventlet.posthooks']:
+ hook(self.environ, *args, **kwargs)
+
+ if self.server.log_output:
+ client_host, client_port = self.get_client_address()
+
+ self.server.log.info(self.server.log_format % {
+ 'client_ip': client_host,
+ 'client_port': client_port,
+ 'date_time': self.log_date_time_string(),
+ 'request_line': self.requestline,
+ 'status_code': status_code[0],
+ 'body_length': length[0],
+ 'wall_seconds': finish - start,
+ })
+
+ def get_client_address(self):
+ host, port = addr_to_host_port(self.client_address)
+
+ if self.server.log_x_forwarded_for:
+ forward = self.headers.get('X-Forwarded-For', '').replace(' ', '')
+ if forward:
+ host = forward + ',' + host
+ return (host, port)
+
+ def get_environ(self):
+ env = self.server.get_environ()
+ env['REQUEST_METHOD'] = self.command
+ env['SCRIPT_NAME'] = ''
+
+ pq = self.path.split('?', 1)
+ env['RAW_PATH_INFO'] = pq[0]
+ env['PATH_INFO'] = urllib.parse.unquote(pq[0], encoding='latin1')
+ if len(pq) > 1:
+ env['QUERY_STRING'] = pq[1]
+
+ ct = self.headers.get('content-type')
+ if ct is None:
+ try:
+ ct = self.headers.type
+ except AttributeError:
+ ct = self.headers.get_content_type()
+ env['CONTENT_TYPE'] = ct
+
+ length = self.headers.get('content-length')
+ if length:
+ env['CONTENT_LENGTH'] = length
+ env['SERVER_PROTOCOL'] = 'HTTP/1.0'
+
+ sockname = self.request.getsockname()
+ server_addr = addr_to_host_port(sockname)
+ env['SERVER_NAME'] = server_addr[0]
+ env['SERVER_PORT'] = str(server_addr[1])
+ client_addr = addr_to_host_port(self.client_address)
+ env['REMOTE_ADDR'] = client_addr[0]
+ env['REMOTE_PORT'] = str(client_addr[1])
+ env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+
+ try:
+ headers = self.headers.headers
+ except AttributeError:
+ headers = self.headers._headers
+ else:
+ headers = [h.split(':', 1) for h in headers]
+
+ env['headers_raw'] = headers_raw = tuple((k, v.strip(' \t\n\r')) for k, v in headers)
+ for k, v in headers_raw:
+ k = k.replace('-', '_').upper()
+ if k in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
+ # These do not get the HTTP_ prefix and were handled above
+ continue
+ envk = 'HTTP_' + k
+ if envk in env:
+ env[envk] += ',' + v
+ else:
+ env[envk] = v
+
+ if env.get('HTTP_EXPECT', '').lower() == '100-continue':
+ wfile = self.wfile
+ wfile_line = b'HTTP/1.1 100 Continue\r\n'
+ else:
+ wfile = None
+ wfile_line = None
+ chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
+ env['wsgi.input'] = env['eventlet.input'] = Input(
+ self.rfile, length, self.connection, wfile=wfile, wfile_line=wfile_line,
+ chunked_input=chunked)
+ env['eventlet.posthooks'] = []
+
+ # WebSocketWSGI needs a way to flag the connection as idle,
+ # since it may never fall out of handle_one_request
+ def set_idle():
+ self.conn_state[2] = STATE_IDLE
+ env['eventlet.set_idle'] = set_idle
+
+ return env
+
+ def finish(self):
+ try:
+ BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
+ except OSError as e:
+ # Broken pipe, connection reset by peer
+ if support.get_errno(e) not in BROKEN_SOCK:
+ raise
+ greenio.shutdown_safe(self.connection)
+ self.connection.close()
+
+ def handle_expect_100(self):
+ return True
+
+
+class Server(BaseHTTPServer.HTTPServer):
+
+ def __init__(self,
+ socket,
+ address,
+ app,
+ log=None,
+ environ=None,
+ max_http_version=None,
+ protocol=HttpProtocol,
+ minimum_chunk_size=None,
+ log_x_forwarded_for=True,
+ keepalive=True,
+ log_output=True,
+ log_format=DEFAULT_LOG_FORMAT,
+ url_length_limit=MAX_REQUEST_LINE,
+ debug=True,
+ socket_timeout=None,
+ capitalize_response_headers=True):
+
+ self.outstanding_requests = 0
+ self.socket = socket
+ self.address = address
+ self.log = LoggerNull()
+ if log_output:
+ self.log = get_logger(log, debug)
+ self.app = app
+ self.keepalive = keepalive
+ self.environ = environ
+ self.max_http_version = max_http_version
+ self.protocol = protocol
+ self.pid = os.getpid()
+ self.minimum_chunk_size = minimum_chunk_size
+ self.log_x_forwarded_for = log_x_forwarded_for
+ self.log_output = log_output
+ self.log_format = log_format
+ self.url_length_limit = url_length_limit
+ self.debug = debug
+ self.socket_timeout = socket_timeout
+ self.capitalize_response_headers = capitalize_response_headers
+
+ if not self.capitalize_response_headers:
+ warnings.warn("""capitalize_response_headers is disabled.
+ Please, make sure you know what you are doing.
+ HTTP headers names are case-insensitive per RFC standard.
+ Most likely, you need to fix HTTP parsing in your client software.""",
+ DeprecationWarning, stacklevel=3)
+
+ def get_environ(self):
+ d = {
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.version': (1, 0),
+ 'wsgi.multithread': True,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': False,
+ 'wsgi.url_scheme': 'http',
+ }
+ # detect secure socket
+ if hasattr(self.socket, 'do_handshake'):
+ d['wsgi.url_scheme'] = 'https'
+ d['HTTPS'] = 'on'
+ if self.environ is not None:
+ d.update(self.environ)
+ return d
+
+ def process_request(self, conn_state):
+ # The actual request handling takes place in __init__, so we need to
+ # set minimum_chunk_size before __init__ executes and we don't want to modify
+ # class variable
+ proto = new(self.protocol)
+ if self.minimum_chunk_size is not None:
+ proto.minimum_chunk_size = self.minimum_chunk_size
+ proto.capitalize_response_headers = self.capitalize_response_headers
+ try:
+ proto.__init__(conn_state, self)
+ except socket.timeout:
+ # Expected exceptions are not exceptional
+ conn_state[1].close()
+ # similar to logging "accepted" in server()
+ self.log.debug('({}) timed out {!r}'.format(self.pid, conn_state[0]))
+
+ def log_message(self, message):
+ raise AttributeError('''\
+eventlet.wsgi.server.log_message was deprecated and deleted.
+Please use server.log.info instead.''')
+
+
+try:
+ new = types.InstanceType
+except AttributeError:
+ new = lambda cls: cls.__new__(cls)
+
+
+try:
+ import ssl
+ ACCEPT_EXCEPTIONS = (socket.error, ssl.SSLError)
+ ACCEPT_ERRNO = {errno.EPIPE, errno.ECONNRESET,
+ errno.ESHUTDOWN, ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_SSL}
+except ImportError:
+ ACCEPT_EXCEPTIONS = (socket.error,)
+ ACCEPT_ERRNO = {errno.EPIPE, errno.ECONNRESET, errno.ESHUTDOWN}
+
+
+def socket_repr(sock):
+ scheme = 'http'
+ if hasattr(sock, 'do_handshake'):
+ scheme = 'https'
+
+ name = sock.getsockname()
+ if sock.family == socket.AF_INET:
+ hier_part = '//{}:{}'.format(*name)
+ elif sock.family == socket.AF_INET6:
+ hier_part = '//[{}]:{}'.format(*name[:2])
+ elif sock.family == socket.AF_UNIX:
+ hier_part = name
+ else:
+ hier_part = repr(name)
+
+ return scheme + ':' + hier_part
+
+
+def server(sock, site,
+ log=None,
+ environ=None,
+ max_size=None,
+ max_http_version=DEFAULT_MAX_HTTP_VERSION,
+ protocol=HttpProtocol,
+ server_event=None,
+ minimum_chunk_size=None,
+ log_x_forwarded_for=True,
+ custom_pool=None,
+ keepalive=True,
+ log_output=True,
+ log_format=DEFAULT_LOG_FORMAT,
+ url_length_limit=MAX_REQUEST_LINE,
+ debug=True,
+ socket_timeout=None,
+ capitalize_response_headers=True):
+ """Start up a WSGI server handling requests from the supplied server
+ socket. This function loops forever. The *sock* object will be
+ closed after server exits, but the underlying file descriptor will
+ remain open, so if you have a dup() of *sock*, it will remain usable.
+
+ .. warning::
+
+ At the moment :func:`server` will always wait for active connections to finish before
+ exiting, even if there's an exception raised inside it
+ (*all* exceptions are handled the same way, including :class:`greenlet.GreenletExit`
+ and those inheriting from `BaseException`).
+
+ While this may not be an issue normally, when it comes to long running HTTP connections
+ (like :mod:`eventlet.websocket`) it will become problematic and calling
+ :meth:`~eventlet.greenthread.GreenThread.wait` on a thread that runs the server may hang,
+ even after using :meth:`~eventlet.greenthread.GreenThread.kill`, as long
+ as there are active connections.
+
+ :param sock: Server socket, must be already bound to a port and listening.
+ :param site: WSGI application function.
+ :param log: logging.Logger instance or file-like object that logs should be written to.
+ If a Logger instance is supplied, messages are sent to the INFO log level.
+ If not specified, sys.stderr is used.
+ :param environ: Additional parameters that go into the environ dictionary of every request.
+ :param max_size: Maximum number of client connections opened at any time by this server.
+ Default is 1024.
+ :param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0.
+ This can help with applications or clients that don't behave properly using HTTP 1.1.
+ :param protocol: Protocol class. Deprecated.
+ :param server_event: Used to collect the Server object. Deprecated.
+ :param minimum_chunk_size: Minimum size in bytes for http chunks. This can be used to improve
+ performance of applications which yield many small strings, though
+ using it technically violates the WSGI spec. This can be overridden
+ on a per request basis by setting environ['eventlet.minimum_write_chunk_size'].
+ :param log_x_forwarded_for: If True (the default), logs the contents of the x-forwarded-for
+ header in addition to the actual client ip address in the 'client_ip' field of the
+ log line.
+ :param custom_pool: A custom GreenPool instance which is used to spawn client green threads.
+ If this is supplied, max_size is ignored.
+ :param keepalive: If set to False or zero, disables keepalives on the server; all connections
+ will be closed after serving one request. If numeric, it will be the timeout used
+ when reading the next request.
+ :param log_output: A Boolean indicating if the server will log data or not.
+ :param log_format: A python format string that is used as the template to generate log lines.
+ The following values can be formatted into it: client_ip, date_time, request_line,
+ status_code, body_length, wall_seconds. The default is a good example of how to
+ use it.
+ :param url_length_limit: A maximum allowed length of the request url. If exceeded, 414 error
+ is returned.
+ :param debug: True if the server should send exception tracebacks to the clients on 500 errors.
+ If False, the server will respond with empty bodies.
+ :param socket_timeout: Timeout for client connections' socket operations. Default None means
+ wait forever.
+ :param capitalize_response_headers: Normalize response headers' names to Foo-Bar.
+ Default is True.
+ """
+ serv = Server(
+ sock, sock.getsockname(),
+ site, log,
+ environ=environ,
+ max_http_version=max_http_version,
+ protocol=protocol,
+ minimum_chunk_size=minimum_chunk_size,
+ log_x_forwarded_for=log_x_forwarded_for,
+ keepalive=keepalive,
+ log_output=log_output,
+ log_format=log_format,
+ url_length_limit=url_length_limit,
+ debug=debug,
+ socket_timeout=socket_timeout,
+ capitalize_response_headers=capitalize_response_headers,
+ )
+ if server_event is not None:
+ warnings.warn(
+ 'eventlet.wsgi.Server() server_event kwarg is deprecated and will be removed soon',
+ DeprecationWarning, stacklevel=2)
+ server_event.send(serv)
+ if max_size is None:
+ max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS
+ if custom_pool is not None:
+ pool = custom_pool
+ else:
+ pool = eventlet.GreenPool(max_size)
+
+ if not (hasattr(pool, 'spawn') and hasattr(pool, 'waitall')):
+ raise AttributeError('''\
+eventlet.wsgi.Server pool must provide methods: `spawn`, `waitall`.
+If unsure, use eventlet.GreenPool.''')
+
+ # [addr, socket, state]
+ connections = {}
+
+ def _clean_connection(_, conn):
+ connections.pop(conn[0], None)
+ conn[2] = STATE_CLOSE
+ greenio.shutdown_safe(conn[1])
+ conn[1].close()
+
+ try:
+ serv.log.info('({}) wsgi starting up on {}'.format(serv.pid, socket_repr(sock)))
+ while is_accepting:
+ try:
+ client_socket, client_addr = sock.accept()
+ client_socket.settimeout(serv.socket_timeout)
+ serv.log.debug('({}) accepted {!r}'.format(serv.pid, client_addr))
+ connections[client_addr] = connection = [client_addr, client_socket, STATE_IDLE]
+ (pool.spawn(serv.process_request, connection)
+ .link(_clean_connection, connection))
+ except ACCEPT_EXCEPTIONS as e:
+ if support.get_errno(e) not in ACCEPT_ERRNO:
+ raise
+ else:
+ break
+ except (KeyboardInterrupt, SystemExit):
+ serv.log.info('wsgi exiting')
+ break
+ finally:
+ for cs in connections.values():
+ prev_state = cs[2]
+ cs[2] = STATE_CLOSE
+ if prev_state == STATE_IDLE:
+ greenio.shutdown_safe(cs[1])
+ pool.waitall()
+ serv.log.info('({}) wsgi exited, is_accepting={}'.format(serv.pid, is_accepting))
+ try:
+ # NOTE: It's not clear whether we want this to leave the
+ # socket open or close it. Use cases like Spawning want
+ # the underlying fd to remain open, but if we're going
+ # that far we might as well not bother closing sock at
+ # all.
+ sock.close()
+ except OSError as e:
+ if support.get_errno(e) not in BROKEN_SOCK:
+ traceback.print_exc()
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/README.rst b/.venv/Lib/site-packages/eventlet/zipkin/README.rst
new file mode 100644
index 0000000..b094781
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/README.rst
@@ -0,0 +1,130 @@
+eventlet.zipkin
+===============
+
+`Zipkin `_ is a distributed tracing system developed at Twitter.
+This package provides a WSGI application using eventlet
+with tracing facility that complies with Zipkin.
+
+Why use it?
+From the http://twitter.github.io/zipkin/:
+
+"Collecting traces helps developers gain deeper knowledge about how
+certain requests perform in a distributed system. Let's say we're having
+problems with user requests timing out. We can look up traced requests
+that timed out and display it in the web UI. We'll be able to quickly
+find the service responsible for adding the unexpected response time. If
+the service has been annotated adequately we can also find out where in
+that service the issue is happening."
+
+
+Screenshot
+----------
+
+Zipkin web ui screenshots obtained when applying this module to
+`OpenStack swift `_ are in example/.
+
+
+Requirement
+-----------
+
+A eventlet.zipkin needs `python scribe client `_
+and `thrift `_ (>=0.9),
+because the zipkin collector speaks `scribe `_ protocol.
+Below command will install both scribe client and thrift.
+
+Install facebook-scribe:
+
+::
+
+ pip install facebook-scribe
+
+**Python**: ``2.7`` (Because the current Python Thrift release doesn't support Python 3)
+
+
+How to use
+----------
+
+Add tracing facility to your application
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Apply the monkey patch before you start wsgi server.
+
+.. code:: python
+
+ # Add only 2 lines to your code
+ from eventlet.zipkin import patcher
+ patcher.enable_trace_patch()
+
+ # existing code
+ from eventlet import wsgi
+ wsgi.server(sock, app)
+
+You can pass some parameters to ``enable_trace_patch()``
+
+* host: Scribe daemon IP address (default: '127.0.0.1')
+* port: Scribe daemon port (default: 9410)
+* trace_app_log: A Boolean indicating if the tracer will trace application log together or not. This facility assume that your application uses python standard logging library. (default: False)
+* sampling_rate: A Float value (0.0~1.0) that indicates the tracing frequency. If you specify 1.0, all requests are traced and sent to Zipkin collecotr. If you specify 0.1, only 1/10 requests are traced. (defult: 1.0)
+
+
+(Option) Annotation API
+~~~~~~~~~~~~~~~~~~~~~~~
+If you want to record additional information,
+you can use below API from anywhere in your code.
+
+.. code:: python
+
+ from eventlet.zipkin import api
+
+ api.put_annotation('Cache miss for %s' % request)
+ api.put_key_value('key', 'value')
+
+
+
+
+Zipkin simple setup
+-------------------
+
+::
+
+ $ git clone https://github.com/twitter/zipkin.git
+ $ cd zipkin
+ # Open 3 terminals
+ (terminal1) $ bin/collector
+ (terminal2) $ bin/query
+ (terminal3) $ bin/web
+
+Access http://localhost:8080 from your browser.
+
+
+(Option) fluentd
+----------------
+If you want to buffer the tracing data for performance,
+`fluentd scribe plugin `_ is available.
+Since ``out_scribe plugin`` extends `Buffer Plugin `_ ,
+you can customize buffering parameters in the manner of fluentd.
+Scribe plugin is included in td-agent by default.
+
+
+Sample: ``/etc/td-agent/td-agent.conf``
+
+::
+
+ # in_scribe
+
+ type scribe
+ port 9999
+
+
+ # out_scribe
+
+ type scribe
+ host Zipkin_collector_IP
+ port 9410
+ flush_interval 60s
+ buffer_chunk_limit 256m
+
+
+| And, you need to specify ``patcher.enable_trace_patch(port=9999)`` for in_scribe.
+| In this case, trace data is passed like below.
+| Your application => Local fluentd in_scribe (9999) => Local fluentd out_scribe =====> Remote zipkin collector (9410)
+
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/__init__.py b/.venv/Lib/site-packages/eventlet/zipkin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..e0b7cd4
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/api.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/api.cpython-312.pyc
new file mode 100644
index 0000000..125a47a
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/api.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/client.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/client.cpython-312.pyc
new file mode 100644
index 0000000..c9868fc
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/client.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/greenthread.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/greenthread.cpython-312.pyc
new file mode 100644
index 0000000..76be68c
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/greenthread.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/http.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/http.cpython-312.pyc
new file mode 100644
index 0000000..5960799
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/http.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/log.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/log.cpython-312.pyc
new file mode 100644
index 0000000..43a090e
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/log.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/patcher.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/patcher.cpython-312.pyc
new file mode 100644
index 0000000..137a5f0
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/patcher.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/wsgi.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/wsgi.cpython-312.pyc
new file mode 100644
index 0000000..cc1338d
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/__pycache__/wsgi.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/README.rst b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/README.rst
new file mode 100644
index 0000000..0317d50
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/README.rst
@@ -0,0 +1,8 @@
+_thrift
+========
+
+* This directory is auto-generated by Thrift Compiler by using
+ https://github.com/twitter/zipkin/blob/master/zipkin-thrift/src/main/thrift/com/twitter/zipkin/zipkinCore.thrift
+
+* Do not modify this directory.
+
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/__init__.py b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..843fcdf
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore.thrift b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore.thrift
new file mode 100644
index 0000000..0787ca8
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore.thrift
@@ -0,0 +1,55 @@
+# Copyright 2012 Twitter Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+namespace java com.twitter.zipkin.gen
+namespace rb Zipkin
+
+//************** Collection related structs **************
+
+// these are the annotations we always expect to find in a span
+const string CLIENT_SEND = "cs"
+const string CLIENT_RECV = "cr"
+const string SERVER_SEND = "ss"
+const string SERVER_RECV = "sr"
+
+// this represents a host and port in a network
+struct Endpoint {
+ 1: i32 ipv4,
+ 2: i16 port // beware that this will give us negative ports. some conversion needed
+ 3: string service_name // which service did this operation happen on?
+}
+
+// some event took place, either one by the framework or by the user
+struct Annotation {
+ 1: i64 timestamp // microseconds from epoch
+ 2: string value // what happened at the timestamp?
+ 3: optional Endpoint host // host this happened on
+}
+
+enum AnnotationType { BOOL, BYTES, I16, I32, I64, DOUBLE, STRING }
+
+struct BinaryAnnotation {
+ 1: string key,
+ 2: binary value,
+ 3: AnnotationType annotation_type,
+ 4: optional Endpoint host
+}
+
+struct Span {
+ 1: i64 trace_id // unique trace id, use for all spans in trace
+ 3: string name, // span name, rpc method for example
+ 4: i64 id, // unique span id, only used for this span
+ 5: optional i64 parent_id, // parent span id
+ 6: list annotations, // list of all annotations/events that occured
+ 8: list binary_annotations // any binary annotations
+}
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__init__.py b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__init__.py
new file mode 100644
index 0000000..adefd8e
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__init__.py
@@ -0,0 +1 @@
+__all__ = ['ttypes', 'constants']
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..5e9d35d
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__pycache__/constants.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__pycache__/constants.cpython-312.pyc
new file mode 100644
index 0000000..5dfac9c
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__pycache__/constants.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__pycache__/ttypes.cpython-312.pyc b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__pycache__/ttypes.cpython-312.pyc
new file mode 100644
index 0000000..73341b7
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/__pycache__/ttypes.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/constants.py b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/constants.py
new file mode 100644
index 0000000..3e04f77
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/constants.py
@@ -0,0 +1,14 @@
+#
+# Autogenerated by Thrift Compiler (0.8.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#
+
+from thrift.Thrift import TType, TMessageType, TException
+from ttypes import *
+
+CLIENT_SEND = "cs"
+CLIENT_RECV = "cr"
+SERVER_SEND = "ss"
+SERVER_RECV = "sr"
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/ttypes.py b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/ttypes.py
new file mode 100644
index 0000000..418911f
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/_thrift/zipkinCore/ttypes.py
@@ -0,0 +1,452 @@
+#
+# Autogenerated by Thrift Compiler (0.8.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#
+
+from thrift.Thrift import TType, TMessageType, TException
+
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol, TProtocol
+try:
+ from thrift.protocol import fastbinary
+except:
+ fastbinary = None
+
+
+class AnnotationType:
+ BOOL = 0
+ BYTES = 1
+ I16 = 2
+ I32 = 3
+ I64 = 4
+ DOUBLE = 5
+ STRING = 6
+
+ _VALUES_TO_NAMES = {
+ 0: "BOOL",
+ 1: "BYTES",
+ 2: "I16",
+ 3: "I32",
+ 4: "I64",
+ 5: "DOUBLE",
+ 6: "STRING",
+ }
+
+ _NAMES_TO_VALUES = {
+ "BOOL": 0,
+ "BYTES": 1,
+ "I16": 2,
+ "I32": 3,
+ "I64": 4,
+ "DOUBLE": 5,
+ "STRING": 6,
+ }
+
+
+class Endpoint:
+ """
+ Attributes:
+ - ipv4
+ - port
+ - service_name
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.I32, 'ipv4', None, None, ), # 1
+ (2, TType.I16, 'port', None, None, ), # 2
+ (3, TType.STRING, 'service_name', None, None, ), # 3
+ )
+
+ def __init__(self, ipv4=None, port=None, service_name=None,):
+ self.ipv4 = ipv4
+ self.port = port
+ self.service_name = service_name
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I32:
+ self.ipv4 = iprot.readI32();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I16:
+ self.port = iprot.readI16();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.service_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('Endpoint')
+ if self.ipv4 is not None:
+ oprot.writeFieldBegin('ipv4', TType.I32, 1)
+ oprot.writeI32(self.ipv4)
+ oprot.writeFieldEnd()
+ if self.port is not None:
+ oprot.writeFieldBegin('port', TType.I16, 2)
+ oprot.writeI16(self.port)
+ oprot.writeFieldEnd()
+ if self.service_name is not None:
+ oprot.writeFieldBegin('service_name', TType.STRING, 3)
+ oprot.writeString(self.service_name)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class Annotation:
+ """
+ Attributes:
+ - timestamp
+ - value
+ - host
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.I64, 'timestamp', None, None, ), # 1
+ (2, TType.STRING, 'value', None, None, ), # 2
+ (3, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None, ), # 3
+ )
+
+ def __init__(self, timestamp=None, value=None, host=None,):
+ self.timestamp = timestamp
+ self.value = value
+ self.host = host
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I64:
+ self.timestamp = iprot.readI64();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.value = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.host = Endpoint()
+ self.host.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('Annotation')
+ if self.timestamp is not None:
+ oprot.writeFieldBegin('timestamp', TType.I64, 1)
+ oprot.writeI64(self.timestamp)
+ oprot.writeFieldEnd()
+ if self.value is not None:
+ oprot.writeFieldBegin('value', TType.STRING, 2)
+ oprot.writeString(self.value)
+ oprot.writeFieldEnd()
+ if self.host is not None:
+ oprot.writeFieldBegin('host', TType.STRUCT, 3)
+ self.host.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class BinaryAnnotation:
+ """
+ Attributes:
+ - key
+ - value
+ - annotation_type
+ - host
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'key', None, None, ), # 1
+ (2, TType.STRING, 'value', None, None, ), # 2
+ (3, TType.I32, 'annotation_type', None, None, ), # 3
+ (4, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None, ), # 4
+ )
+
+ def __init__(self, key=None, value=None, annotation_type=None, host=None,):
+ self.key = key
+ self.value = value
+ self.annotation_type = annotation_type
+ self.host = host
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.key = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.value = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I32:
+ self.annotation_type = iprot.readI32();
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRUCT:
+ self.host = Endpoint()
+ self.host.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('BinaryAnnotation')
+ if self.key is not None:
+ oprot.writeFieldBegin('key', TType.STRING, 1)
+ oprot.writeString(self.key)
+ oprot.writeFieldEnd()
+ if self.value is not None:
+ oprot.writeFieldBegin('value', TType.STRING, 2)
+ oprot.writeString(self.value)
+ oprot.writeFieldEnd()
+ if self.annotation_type is not None:
+ oprot.writeFieldBegin('annotation_type', TType.I32, 3)
+ oprot.writeI32(self.annotation_type)
+ oprot.writeFieldEnd()
+ if self.host is not None:
+ oprot.writeFieldBegin('host', TType.STRUCT, 4)
+ self.host.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class Span:
+ """
+ Attributes:
+ - trace_id
+ - name
+ - id
+ - parent_id
+ - annotations
+ - binary_annotations
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.I64, 'trace_id', None, None, ), # 1
+ None, # 2
+ (3, TType.STRING, 'name', None, None, ), # 3
+ (4, TType.I64, 'id', None, None, ), # 4
+ (5, TType.I64, 'parent_id', None, None, ), # 5
+ (6, TType.LIST, 'annotations', (TType.STRUCT,(Annotation, Annotation.thrift_spec)), None, ), # 6
+ None, # 7
+ (8, TType.LIST, 'binary_annotations', (TType.STRUCT,(BinaryAnnotation, BinaryAnnotation.thrift_spec)), None, ), # 8
+ )
+
+ def __init__(self, trace_id=None, name=None, id=None, parent_id=None, annotations=None, binary_annotations=None,):
+ self.trace_id = trace_id
+ self.name = name
+ self.id = id
+ self.parent_id = parent_id
+ self.annotations = annotations
+ self.binary_annotations = binary_annotations
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I64:
+ self.trace_id = iprot.readI64();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.I64:
+ self.id = iprot.readI64();
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I64:
+ self.parent_id = iprot.readI64();
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.LIST:
+ self.annotations = []
+ (_etype3, _size0) = iprot.readListBegin()
+ for _i4 in xrange(_size0):
+ _elem5 = Annotation()
+ _elem5.read(iprot)
+ self.annotations.append(_elem5)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 8:
+ if ftype == TType.LIST:
+ self.binary_annotations = []
+ (_etype9, _size6) = iprot.readListBegin()
+ for _i10 in xrange(_size6):
+ _elem11 = BinaryAnnotation()
+ _elem11.read(iprot)
+ self.binary_annotations.append(_elem11)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('Span')
+ if self.trace_id is not None:
+ oprot.writeFieldBegin('trace_id', TType.I64, 1)
+ oprot.writeI64(self.trace_id)
+ oprot.writeFieldEnd()
+ if self.name is not None:
+ oprot.writeFieldBegin('name', TType.STRING, 3)
+ oprot.writeString(self.name)
+ oprot.writeFieldEnd()
+ if self.id is not None:
+ oprot.writeFieldBegin('id', TType.I64, 4)
+ oprot.writeI64(self.id)
+ oprot.writeFieldEnd()
+ if self.parent_id is not None:
+ oprot.writeFieldBegin('parent_id', TType.I64, 5)
+ oprot.writeI64(self.parent_id)
+ oprot.writeFieldEnd()
+ if self.annotations is not None:
+ oprot.writeFieldBegin('annotations', TType.LIST, 6)
+ oprot.writeListBegin(TType.STRUCT, len(self.annotations))
+ for iter12 in self.annotations:
+ iter12.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.binary_annotations is not None:
+ oprot.writeFieldBegin('binary_annotations', TType.LIST, 8)
+ oprot.writeListBegin(TType.STRUCT, len(self.binary_annotations))
+ for iter13 in self.binary_annotations:
+ iter13.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/api.py b/.venv/Lib/site-packages/eventlet/zipkin/api.py
new file mode 100644
index 0000000..8edde5c
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/api.py
@@ -0,0 +1,187 @@
+import os
+import sys
+import time
+import struct
+import socket
+import random
+
+from eventlet.green import threading
+from eventlet.zipkin._thrift.zipkinCore import ttypes
+from eventlet.zipkin._thrift.zipkinCore.constants import SERVER_SEND
+
+
+client = None
+_tls = threading.local() # thread local storage
+
+
+def put_annotation(msg, endpoint=None):
+ """ This is annotation API.
+ You can add your own annotation from in your code.
+ Annotation is recorded with timestamp automatically.
+ e.g.) put_annotation('cache hit for %s' % request)
+
+ :param msg: String message
+ :param endpoint: host info
+ """
+ if is_sample():
+ a = ZipkinDataBuilder.build_annotation(msg, endpoint)
+ trace_data = get_trace_data()
+ trace_data.add_annotation(a)
+
+
+def put_key_value(key, value, endpoint=None):
+ """ This is binary annotation API.
+ You can add your own key-value extra information from in your code.
+ Key-value doesn't have a time component.
+ e.g.) put_key_value('http.uri', '/hoge/index.html')
+
+ :param key: String
+ :param value: String
+ :param endpoint: host info
+ """
+ if is_sample():
+ b = ZipkinDataBuilder.build_binary_annotation(key, value, endpoint)
+ trace_data = get_trace_data()
+ trace_data.add_binary_annotation(b)
+
+
+def is_tracing():
+ """ Return whether the current thread is tracking or not """
+ return hasattr(_tls, 'trace_data')
+
+
+def is_sample():
+ """ Return whether it should record trace information
+ for the request or not
+ """
+ return is_tracing() and _tls.trace_data.sampled
+
+
+def get_trace_data():
+ if is_tracing():
+ return _tls.trace_data
+
+
+def set_trace_data(trace_data):
+ _tls.trace_data = trace_data
+
+
+def init_trace_data():
+ if is_tracing():
+ del _tls.trace_data
+
+
+def _uniq_id():
+ """
+ Create a random 64-bit signed integer appropriate
+ for use as trace and span IDs.
+ XXX: By experimentation zipkin has trouble recording traces with ids
+ larger than (2 ** 56) - 1
+ """
+ return random.randint(0, (2 ** 56) - 1)
+
+
+def generate_trace_id():
+ return _uniq_id()
+
+
+def generate_span_id():
+ return _uniq_id()
+
+
+class TraceData:
+
+ END_ANNOTATION = SERVER_SEND
+
+ def __init__(self, name, trace_id, span_id, parent_id, sampled, endpoint):
+ """
+ :param name: RPC name (String)
+ :param trace_id: int
+ :param span_id: int
+ :param parent_id: int or None
+ :param sampled: lets the downstream servers know
+ if I should record trace data for the request (bool)
+ :param endpoint: zipkin._thrift.zipkinCore.ttypes.EndPoint
+ """
+ self.name = name
+ self.trace_id = trace_id
+ self.span_id = span_id
+ self.parent_id = parent_id
+ self.sampled = sampled
+ self.endpoint = endpoint
+ self.annotations = []
+ self.bannotations = []
+ self._done = False
+
+ def add_annotation(self, annotation):
+ if annotation.host is None:
+ annotation.host = self.endpoint
+ if not self._done:
+ self.annotations.append(annotation)
+ if annotation.value == self.END_ANNOTATION:
+ self.flush()
+
+ def add_binary_annotation(self, bannotation):
+ if bannotation.host is None:
+ bannotation.host = self.endpoint
+ if not self._done:
+ self.bannotations.append(bannotation)
+
+ def flush(self):
+ span = ZipkinDataBuilder.build_span(name=self.name,
+ trace_id=self.trace_id,
+ span_id=self.span_id,
+ parent_id=self.parent_id,
+ annotations=self.annotations,
+ bannotations=self.bannotations)
+ client.send_to_collector(span)
+ self.annotations = []
+ self.bannotations = []
+ self._done = True
+
+
+class ZipkinDataBuilder:
+ @staticmethod
+ def build_span(name, trace_id, span_id, parent_id,
+ annotations, bannotations):
+ return ttypes.Span(
+ name=name,
+ trace_id=trace_id,
+ id=span_id,
+ parent_id=parent_id,
+ annotations=annotations,
+ binary_annotations=bannotations
+ )
+
+ @staticmethod
+ def build_annotation(value, endpoint=None):
+ if isinstance(value, str):
+ value = value.encode('utf-8')
+ assert isinstance(value, bytes)
+ return ttypes.Annotation(time.time() * 1000 * 1000,
+ value, endpoint)
+
+ @staticmethod
+ def build_binary_annotation(key, value, endpoint=None):
+ annotation_type = ttypes.AnnotationType.STRING
+ return ttypes.BinaryAnnotation(key, value, annotation_type, endpoint)
+
+ @staticmethod
+ def build_endpoint(ipv4=None, port=None, service_name=None):
+ if ipv4 is not None:
+ ipv4 = ZipkinDataBuilder._ipv4_to_int(ipv4)
+ if service_name is None:
+ service_name = ZipkinDataBuilder._get_script_name()
+ return ttypes.Endpoint(
+ ipv4=ipv4,
+ port=port,
+ service_name=service_name
+ )
+
+ @staticmethod
+ def _ipv4_to_int(ipv4):
+ return struct.unpack('!i', socket.inet_aton(ipv4))[0]
+
+ @staticmethod
+ def _get_script_name():
+ return os.path.basename(sys.argv[0])
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/client.py b/.venv/Lib/site-packages/eventlet/zipkin/client.py
new file mode 100644
index 0000000..faff244
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/client.py
@@ -0,0 +1,56 @@
+import base64
+import warnings
+
+from scribe import scribe
+from thrift.transport import TTransport, TSocket
+from thrift.protocol import TBinaryProtocol
+
+from eventlet import GreenPile
+
+
+CATEGORY = 'zipkin'
+
+
+class ZipkinClient:
+
+ def __init__(self, host='127.0.0.1', port=9410):
+ """
+ :param host: zipkin collector IP address (default '127.0.0.1')
+ :param port: zipkin collector port (default 9410)
+ """
+ self.host = host
+ self.port = port
+ self.pile = GreenPile(1)
+ self._connect()
+
+ def _connect(self):
+ socket = TSocket.TSocket(self.host, self.port)
+ self.transport = TTransport.TFramedTransport(socket)
+ protocol = TBinaryProtocol.TBinaryProtocol(self.transport,
+ False, False)
+ self.scribe_client = scribe.Client(protocol)
+ try:
+ self.transport.open()
+ except TTransport.TTransportException as e:
+ warnings.warn(e.message)
+
+ def _build_message(self, thrift_obj):
+ trans = TTransport.TMemoryBuffer()
+ protocol = TBinaryProtocol.TBinaryProtocolAccelerated(trans=trans)
+ thrift_obj.write(protocol)
+ return base64.b64encode(trans.getvalue())
+
+ def send_to_collector(self, span):
+ self.pile.spawn(self._send, span)
+
+ def _send(self, span):
+ log_entry = scribe.LogEntry(CATEGORY, self._build_message(span))
+ try:
+ self.scribe_client.Log([log_entry])
+ except Exception as e:
+ msg = 'ZipkinClient send error %s' % str(e)
+ warnings.warn(msg)
+ self._connect()
+
+ def close(self):
+ self.transport.close()
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/example/ex1.png b/.venv/Lib/site-packages/eventlet/zipkin/example/ex1.png
new file mode 100644
index 0000000..7f7a049
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/example/ex1.png differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/example/ex2.png b/.venv/Lib/site-packages/eventlet/zipkin/example/ex2.png
new file mode 100644
index 0000000..19dbc3a
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/example/ex2.png differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/example/ex3.png b/.venv/Lib/site-packages/eventlet/zipkin/example/ex3.png
new file mode 100644
index 0000000..5ff9860
Binary files /dev/null and b/.venv/Lib/site-packages/eventlet/zipkin/example/ex3.png differ
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/greenthread.py b/.venv/Lib/site-packages/eventlet/zipkin/greenthread.py
new file mode 100644
index 0000000..37e12d6
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/greenthread.py
@@ -0,0 +1,33 @@
+from eventlet import greenthread
+
+from eventlet.zipkin import api
+
+
+__original_init__ = greenthread.GreenThread.__init__
+__original_main__ = greenthread.GreenThread.main
+
+
+def _patched__init(self, parent):
+ # parent thread saves current TraceData from tls to self
+ if api.is_tracing():
+ self.trace_data = api.get_trace_data()
+
+ __original_init__(self, parent)
+
+
+def _patched_main(self, function, args, kwargs):
+ # child thread inherits TraceData
+ if hasattr(self, 'trace_data'):
+ api.set_trace_data(self.trace_data)
+
+ __original_main__(self, function, args, kwargs)
+
+
+def patch():
+ greenthread.GreenThread.__init__ = _patched__init
+ greenthread.GreenThread.main = _patched_main
+
+
+def unpatch():
+ greenthread.GreenThread.__init__ = __original_init__
+ greenthread.GreenThread.main = __original_main__
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/http.py b/.venv/Lib/site-packages/eventlet/zipkin/http.py
new file mode 100644
index 0000000..f981a17
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/http.py
@@ -0,0 +1,29 @@
+import warnings
+
+from eventlet.green import httplib
+from eventlet.zipkin import api
+
+
+# see https://twitter.github.io/zipkin/Instrumenting.html
+HDR_TRACE_ID = 'X-B3-TraceId'
+HDR_SPAN_ID = 'X-B3-SpanId'
+HDR_PARENT_SPAN_ID = 'X-B3-ParentSpanId'
+HDR_SAMPLED = 'X-B3-Sampled'
+
+
+def patch():
+ warnings.warn("Since current Python thrift release \
+ doesn't support Python 3, eventlet.zipkin.http \
+ doesn't also support Python 3 (http.client)")
+
+
+def unpatch():
+ pass
+
+
+def hex_str(n):
+ """
+ Thrift uses a binary representation of trace and span ids
+ HTTP headers use a hexadecimal representation of the same
+ """
+ return '%0.16x' % (n,)
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/log.py b/.venv/Lib/site-packages/eventlet/zipkin/log.py
new file mode 100644
index 0000000..b7f9d32
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/log.py
@@ -0,0 +1,19 @@
+import logging
+
+from eventlet.zipkin import api
+
+
+__original_handle__ = logging.Logger.handle
+
+
+def _patched_handle(self, record):
+ __original_handle__(self, record)
+ api.put_annotation(record.getMessage())
+
+
+def patch():
+ logging.Logger.handle = _patched_handle
+
+
+def unpatch():
+ logging.Logger.handle = __original_handle__
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/patcher.py b/.venv/Lib/site-packages/eventlet/zipkin/patcher.py
new file mode 100644
index 0000000..8e7d8ad
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/patcher.py
@@ -0,0 +1,41 @@
+from eventlet.zipkin import http
+from eventlet.zipkin import wsgi
+from eventlet.zipkin import greenthread
+from eventlet.zipkin import log
+from eventlet.zipkin import api
+from eventlet.zipkin.client import ZipkinClient
+
+
+def enable_trace_patch(host='127.0.0.1', port=9410,
+ trace_app_log=False, sampling_rate=1.0):
+ """ Apply monkey patch to trace your WSGI application.
+
+ :param host: Scribe daemon IP address (default: '127.0.0.1')
+ :param port: Scribe daemon port (default: 9410)
+ :param trace_app_log: A Boolean indicating if the tracer will trace
+ application log together or not. This facility assume that
+ your application uses python standard logging library.
+ (default: False)
+ :param sampling_rate: A Float value (0.0~1.0) that indicates
+ the tracing frequency. If you specify 1.0, all request
+ are traced (and sent to Zipkin collecotr).
+ If you specify 0.1, only 1/10 requests are traced. (default: 1.0)
+ """
+ api.client = ZipkinClient(host, port)
+
+ # monkey patch for adding tracing facility
+ wsgi.patch(sampling_rate)
+ http.patch()
+ greenthread.patch()
+
+ # monkey patch for capturing application log
+ if trace_app_log:
+ log.patch()
+
+
+def disable_trace_patch():
+ http.unpatch()
+ wsgi.unpatch()
+ greenthread.unpatch()
+ log.unpatch()
+ api.client.close()
diff --git a/.venv/Lib/site-packages/eventlet/zipkin/wsgi.py b/.venv/Lib/site-packages/eventlet/zipkin/wsgi.py
new file mode 100644
index 0000000..402d142
--- /dev/null
+++ b/.venv/Lib/site-packages/eventlet/zipkin/wsgi.py
@@ -0,0 +1,78 @@
+import random
+
+from eventlet import wsgi
+from eventlet.zipkin import api
+from eventlet.zipkin._thrift.zipkinCore.constants import \
+ SERVER_RECV, SERVER_SEND
+from eventlet.zipkin.http import \
+ HDR_TRACE_ID, HDR_SPAN_ID, HDR_PARENT_SPAN_ID, HDR_SAMPLED
+
+
+_sampler = None
+__original_handle_one_response__ = wsgi.HttpProtocol.handle_one_response
+
+
+def _patched_handle_one_response(self):
+ api.init_trace_data()
+ trace_id = int_or_none(self.headers.getheader(HDR_TRACE_ID))
+ span_id = int_or_none(self.headers.getheader(HDR_SPAN_ID))
+ parent_id = int_or_none(self.headers.getheader(HDR_PARENT_SPAN_ID))
+ sampled = bool_or_none(self.headers.getheader(HDR_SAMPLED))
+ if trace_id is None: # front-end server
+ trace_id = span_id = api.generate_trace_id()
+ parent_id = None
+ sampled = _sampler.sampling()
+ ip, port = self.request.getsockname()[:2]
+ ep = api.ZipkinDataBuilder.build_endpoint(ip, port)
+ trace_data = api.TraceData(name=self.command,
+ trace_id=trace_id,
+ span_id=span_id,
+ parent_id=parent_id,
+ sampled=sampled,
+ endpoint=ep)
+ api.set_trace_data(trace_data)
+ api.put_annotation(SERVER_RECV)
+ api.put_key_value('http.uri', self.path)
+
+ __original_handle_one_response__(self)
+
+ if api.is_sample():
+ api.put_annotation(SERVER_SEND)
+
+
+class Sampler:
+ def __init__(self, sampling_rate):
+ self.sampling_rate = sampling_rate
+
+ def sampling(self):
+ # avoid generating unneeded random numbers
+ if self.sampling_rate == 1.0:
+ return True
+ r = random.random()
+ if r < self.sampling_rate:
+ return True
+ return False
+
+
+def int_or_none(val):
+ if val is None:
+ return None
+ return int(val, 16)
+
+
+def bool_or_none(val):
+ if val == '1':
+ return True
+ if val == '0':
+ return False
+ return None
+
+
+def patch(sampling_rate):
+ global _sampler
+ _sampler = Sampler(sampling_rate)
+ wsgi.HttpProtocol.handle_one_response = _patched_handle_one_response
+
+
+def unpatch():
+ wsgi.HttpProtocol.handle_one_response = __original_handle_one_response__
diff --git a/.venv/Lib/site-packages/flask_socketio/__init__.py b/.venv/Lib/site-packages/flask_socketio/__init__.py
new file mode 100644
index 0000000..c065ae3
--- /dev/null
+++ b/.venv/Lib/site-packages/flask_socketio/__init__.py
@@ -0,0 +1,1117 @@
+from functools import wraps
+import os
+import sys
+
+# make sure gevent-socketio is not installed, as it conflicts with
+# python-socketio
+gevent_socketio_found = True
+try:
+ from socketio import socketio_manage # noqa: F401
+except ImportError:
+ gevent_socketio_found = False
+if gevent_socketio_found:
+ print('The gevent-socketio package is incompatible with this version of '
+ 'the Flask-SocketIO extension. Please uninstall it, and then '
+ 'install the latest version of python-socketio in its place.')
+ sys.exit(1)
+
+import flask
+from flask import has_request_context, json as flask_json
+from flask.sessions import SessionMixin
+import socketio
+from socketio.exceptions import ConnectionRefusedError # noqa: F401
+from werkzeug.debug import DebuggedApplication
+from werkzeug._reloader import run_with_reloader
+
+from .namespace import Namespace
+from .test_client import SocketIOTestClient
+
+
+class _SocketIOMiddleware(socketio.WSGIApp):
+ """This WSGI middleware simply exposes the Flask application in the WSGI
+ environment before executing the request.
+ """
+ def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
+ self.flask_app = flask_app
+ super(_SocketIOMiddleware, self).__init__(socketio_app,
+ flask_app.wsgi_app,
+ socketio_path=socketio_path)
+
+ def __call__(self, environ, start_response):
+ environ = environ.copy()
+ environ['flask.app'] = self.flask_app
+ return super(_SocketIOMiddleware, self).__call__(environ,
+ start_response)
+
+
+class _ManagedSession(dict, SessionMixin):
+ """This class is used for user sessions that are managed by
+ Flask-SocketIO. It is simple dict, expanded with the Flask session
+ attributes."""
+ pass
+
+
+class SocketIO(object):
+ """Create a Flask-SocketIO server.
+
+ :param app: The flask application instance. If the application instance
+ isn't known at the time this class is instantiated, then call
+ ``socketio.init_app(app)`` once the application instance is
+ available.
+ :param manage_session: If set to ``True``, this extension manages the user
+ session for Socket.IO events. If set to ``False``,
+ Flask's own session management is used. When using
+ Flask's cookie based sessions it is recommended that
+ you leave this set to the default of ``True``. When
+ using server-side sessions, a ``False`` setting
+ enables sharing the user session between HTTP routes
+ and Socket.IO events.
+ :param message_queue: A connection URL for a message queue service the
+ server can use for multi-process communication. A
+ message queue is not required when using a single
+ server process.
+ :param channel: The channel name, when using a message queue. If a channel
+ isn't specified, a default channel will be used. If
+ multiple clusters of SocketIO processes need to use the
+ same message queue without interfering with each other,
+ then each cluster should use a different channel.
+ :param path: The path where the Socket.IO server is exposed. Defaults to
+ ``'socket.io'``. Leave this as is unless you know what you are
+ doing.
+ :param resource: Alias to ``path``.
+ :param kwargs: Socket.IO and Engine.IO server options.
+
+ The Socket.IO server options are detailed below:
+
+ :param client_manager: The client manager instance that will manage the
+ client list. When this is omitted, the client list
+ is stored in an in-memory structure, so the use of
+ multiple connected servers is not possible. In most
+ cases, this argument does not need to be set
+ explicitly.
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. The default is
+ ``False``. Note that fatal errors will be logged even when
+ ``logger`` is ``False``.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions. To use the same json encoder and decoder as a Flask
+ application, use ``flask.json``.
+ :param async_handlers: If set to ``True``, event handlers for a client are
+ executed in separate threads. To run handlers for a
+ client synchronously, set to ``False``. The default
+ is ``True``.
+ :param always_connect: When set to ``False``, new connections are
+ provisory until the connect handler returns
+ something other than ``False``, at which point they
+ are accepted. When set to ``True``, connections are
+ immediately accepted, and then if the connect
+ handler returns ``False`` a disconnect is issued.
+ Set to ``True`` if you need to emit events from the
+ connect handler and your client is confused when it
+ receives events before the connection acceptance.
+ In any other case use the default of ``False``.
+
+ The Engine.IO server configuration supports the following settings:
+
+ :param async_mode: The asynchronous model to use. See the Deployment
+ section in the documentation for a description of the
+ available options. Valid async modes are ``threading``,
+ ``eventlet``, ``gevent`` and ``gevent_uwsgi``. If this
+ argument is not given, ``eventlet`` is tried first, then
+ ``gevent_uwsgi``, then ``gevent``, and finally
+ ``threading``. The first async mode that has all its
+ dependencies installed is then one that is chosen.
+ :param ping_interval: The interval in seconds at which the server pings
+ the client. The default is 25 seconds. For advanced
+ control, a two element tuple can be given, where
+ the first number is the ping interval and the second
+ is a grace period added by the server.
+ :param ping_timeout: The time in seconds that the client waits for the
+ server to respond before disconnecting. The default
+ is 5 seconds.
+ :param max_http_buffer_size: The maximum size of a message when using the
+ polling transport. The default is 1,000,000
+ bytes.
+ :param allow_upgrades: Whether to allow transport upgrades or not. The
+ default is ``True``.
+ :param http_compression: Whether to compress packages when using the
+ polling transport. The default is ``True``.
+ :param compression_threshold: Only compress messages when their byte size
+ is greater than this value. The default is
+ 1024 bytes.
+ :param cookie: If set to a string, it is the name of the HTTP cookie the
+ server sends back to the client containing the client
+ session id. If set to a dictionary, the ``'name'`` key
+ contains the cookie name and other keys define cookie
+ attributes, where the value of each attribute can be a
+ string, a callable with no arguments, or a boolean. If set
+ to ``None`` (the default), a cookie is not sent to the
+ client.
+ :param cors_allowed_origins: Origin or list of origins that are allowed to
+ connect to this server. Only the same origin
+ is allowed by default. Set this argument to
+ ``'*'`` to allow all origins, or to ``[]`` to
+ disable CORS handling.
+ :param cors_credentials: Whether credentials (cookies, authentication) are
+ allowed in requests to this server. The default is
+ ``True``.
+ :param monitor_clients: If set to ``True``, a background task will ensure
+ inactive clients are closed. Set to ``False`` to
+ disable the monitoring task (not recommended). The
+ default is ``True``.
+ :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
+ a logger object to use. To disable logging set to
+ ``False``. The default is ``False``. Note that
+ fatal errors are logged even when
+ ``engineio_logger`` is ``False``.
+ """
+
+ def __init__(self, app=None, **kwargs):
+ self.server = None
+ self.server_options = {}
+ self.wsgi_server = None
+ self.handlers = []
+ self.namespace_handlers = []
+ self.exception_handlers = {}
+ self.default_exception_handler = None
+ self.manage_session = True
+ # We can call init_app when:
+ # - we were given the Flask app instance (standard initialization)
+ # - we were not given the app, but we were given a message_queue
+ # (standard initialization for auxiliary process)
+ # In all other cases we collect the arguments and assume the client
+ # will call init_app from an app factory function.
+ if app is not None or 'message_queue' in kwargs:
+ self.init_app(app, **kwargs)
+ else:
+ self.server_options.update(kwargs)
+
+ def init_app(self, app, **kwargs):
+ if app is not None:
+ if not hasattr(app, 'extensions'):
+ app.extensions = {} # pragma: no cover
+ app.extensions['socketio'] = self
+ self.server_options.update(kwargs)
+ self.manage_session = self.server_options.pop('manage_session',
+ self.manage_session)
+
+ if 'client_manager' not in kwargs:
+ url = self.server_options.get('message_queue', None)
+ channel = self.server_options.pop('channel', 'flask-socketio')
+ write_only = app is None
+ if url:
+ if url.startswith(('redis://', "rediss://")):
+ queue_class = socketio.RedisManager
+ elif url.startswith(('kafka://')):
+ queue_class = socketio.KafkaManager
+ elif url.startswith('zmq'):
+ queue_class = socketio.ZmqManager
+ else:
+ queue_class = socketio.KombuManager
+ queue = queue_class(url, channel=channel,
+ write_only=write_only)
+ self.server_options['client_manager'] = queue
+
+ if 'json' in self.server_options and \
+ self.server_options['json'] == flask_json:
+ # flask's json module is tricky to use because its output
+ # changes when it is invoked inside or outside the app context
+ # so here to prevent any ambiguities we replace it with wrappers
+ # that ensure that the app context is always present
+ class FlaskSafeJSON(object):
+ @staticmethod
+ def dumps(*args, **kwargs):
+ with app.app_context():
+ return flask_json.dumps(*args, **kwargs)
+
+ @staticmethod
+ def loads(*args, **kwargs):
+ with app.app_context():
+ return flask_json.loads(*args, **kwargs)
+
+ self.server_options['json'] = FlaskSafeJSON
+
+ resource = self.server_options.pop('path', None) or \
+ self.server_options.pop('resource', None) or 'socket.io'
+ if resource.startswith('/'):
+ resource = resource[1:]
+ if os.environ.get('FLASK_RUN_FROM_CLI'):
+ if self.server_options.get('async_mode') is None:
+ self.server_options['async_mode'] = 'threading'
+ self.server = socketio.Server(**self.server_options)
+ self.async_mode = self.server.async_mode
+ for handler in self.handlers:
+ self.server.on(handler[0], handler[1], namespace=handler[2])
+ for namespace_handler in self.namespace_handlers:
+ self.server.register_namespace(namespace_handler)
+
+ if app is not None:
+ # here we attach the SocketIO middleware to the SocketIO object so
+ # it can be referenced later if debug middleware needs to be
+ # inserted
+ self.sockio_mw = _SocketIOMiddleware(self.server, app,
+ socketio_path=resource)
+ app.wsgi_app = self.sockio_mw
+
+ def on(self, message, namespace=None):
+ """Decorator to register a SocketIO event handler.
+
+ This decorator must be applied to SocketIO event handlers. Example::
+
+ @socketio.on('my event', namespace='/chat')
+ def handle_my_custom_event(json):
+ print('received json: ' + str(json))
+
+ :param message: The name of the event. This is normally a user defined
+ string, but a few event names are already defined. Use
+ ``'message'`` to define a handler that takes a string
+ payload, ``'json'`` to define a handler that takes a
+ JSON blob payload, ``'connect'`` or ``'disconnect'``
+ to create handlers for connection and disconnection
+ events.
+ :param namespace: The namespace on which the handler is to be
+ registered. Defaults to the global namespace.
+ """
+ namespace = namespace or '/'
+
+ def decorator(handler):
+ @wraps(handler)
+ def _handler(sid, *args):
+ return self._handle_event(handler, message, namespace, sid,
+ *args)
+
+ if self.server:
+ self.server.on(message, _handler, namespace=namespace)
+ else:
+ self.handlers.append((message, _handler, namespace))
+ return handler
+ return decorator
+
+ def on_error(self, namespace=None):
+ """Decorator to define a custom error handler for SocketIO events.
+
+ This decorator can be applied to a function that acts as an error
+ handler for a namespace. This handler will be invoked when a SocketIO
+ event handler raises an exception. The handler function must accept one
+ argument, which is the exception raised. Example::
+
+ @socketio.on_error(namespace='/chat')
+ def chat_error_handler(e):
+ print('An error has occurred: ' + str(e))
+
+ :param namespace: The namespace for which to register the error
+ handler. Defaults to the global namespace.
+ """
+ namespace = namespace or '/'
+
+ def decorator(exception_handler):
+ if not callable(exception_handler):
+ raise ValueError('exception_handler must be callable')
+ self.exception_handlers[namespace] = exception_handler
+ return exception_handler
+ return decorator
+
+ def on_error_default(self, exception_handler):
+ """Decorator to define a default error handler for SocketIO events.
+
+ This decorator can be applied to a function that acts as a default
+ error handler for any namespaces that do not have a specific handler.
+ Example::
+
+ @socketio.on_error_default
+ def error_handler(e):
+ print('An error has occurred: ' + str(e))
+ """
+ if not callable(exception_handler):
+ raise ValueError('exception_handler must be callable')
+ self.default_exception_handler = exception_handler
+ return exception_handler
+
+ def on_event(self, message, handler, namespace=None):
+ """Register a SocketIO event handler.
+
+ ``on_event`` is the non-decorator version of ``'on'``.
+
+ Example::
+
+ def on_foo_event(json):
+ print('received json: ' + str(json))
+
+ socketio.on_event('my event', on_foo_event, namespace='/chat')
+
+ :param message: The name of the event. This is normally a user defined
+ string, but a few event names are already defined. Use
+ ``'message'`` to define a handler that takes a string
+ payload, ``'json'`` to define a handler that takes a
+ JSON blob payload, ``'connect'`` or ``'disconnect'``
+ to create handlers for connection and disconnection
+ events.
+ :param handler: The function that handles the event.
+ :param namespace: The namespace on which the handler is to be
+ registered. Defaults to the global namespace.
+ """
+ self.on(message, namespace=namespace)(handler)
+
+ def event(self, *args, **kwargs):
+ """Decorator to register an event handler.
+
+ This is a simplified version of the ``on()`` method that takes the
+ event name from the decorated function.
+
+ Example usage::
+
+ @socketio.event
+ def my_event(data):
+ print('Received data: ', data)
+
+ The above example is equivalent to::
+
+ @socketio.on('my_event')
+ def my_event(data):
+ print('Received data: ', data)
+
+ A custom namespace can be given as an argument to the decorator::
+
+ @socketio.event(namespace='/test')
+ def my_event(data):
+ print('Received data: ', data)
+ """
+ if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
+ # the decorator was invoked without arguments
+ # args[0] is the decorated function
+ return self.on(args[0].__name__)(args[0])
+ else:
+ # the decorator was invoked with arguments
+ def set_handler(handler):
+ return self.on(handler.__name__, *args, **kwargs)(handler)
+
+ return set_handler
+
+ def on_namespace(self, namespace_handler):
+ if not isinstance(namespace_handler, Namespace):
+ raise ValueError('Not a namespace instance.')
+ namespace_handler._set_socketio(self)
+ if self.server:
+ self.server.register_namespace(namespace_handler)
+ else:
+ self.namespace_handlers.append(namespace_handler)
+
+ def emit(self, event, *args, **kwargs):
+ """Emit a server generated SocketIO event.
+
+ This function emits a SocketIO event to one or more connected clients.
+ A JSON blob can be attached to the event as payload. This function can
+ be used outside of a SocketIO event context, so it is appropriate to
+ use when the server is the originator of an event, outside of any
+ client context, such as in a regular HTTP request handler or a
+ background task. Example::
+
+ @app.route('/ping')
+ def ping():
+ socketio.emit('ping event', {'data': 42}, namespace='/chat')
+
+ :param event: The name of the user event to emit.
+ :param args: A dictionary with the JSON data to send as payload.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the global namespace.
+ :param to: Send the message to all the users in the given room, or to
+ the user with the given session ID. If this parameter is not
+ included, the event is sent to all connected users.
+ :param include_self: ``True`` to include the sender when broadcasting
+ or addressing a room, or ``False`` to send to
+ everyone but the sender.
+ :param skip_sid: The session id of a client to ignore when broadcasting
+ or addressing a room. This is typically set to the
+ originator of the message, so that everyone except
+ that client receive the message. To skip multiple sids
+ pass a list.
+ :param callback: If given, this function will be called to acknowledge
+ that the client has received the message. The
+ arguments that will be passed to the function are
+ those provided by the client. Callback functions can
+ only be used when addressing an individual client.
+ """
+ namespace = kwargs.pop('namespace', '/')
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ include_self = kwargs.pop('include_self', True)
+ skip_sid = kwargs.pop('skip_sid', None)
+ if not include_self and not skip_sid:
+ skip_sid = flask.request.sid
+ callback = kwargs.pop('callback', None)
+ if callback:
+ # wrap the callback so that it sets app app and request contexts
+ sid = None
+ original_callback = callback
+ original_namespace = namespace
+ if has_request_context():
+ sid = getattr(flask.request, 'sid', None)
+ original_namespace = getattr(flask.request, 'namespace', None)
+
+ def _callback_wrapper(*args):
+ return self._handle_event(original_callback, None,
+ original_namespace, sid, *args)
+
+ if sid:
+ # the callback wrapper above will install a request context
+ # before invoking the original callback
+ # we only use it if the emit was issued from a Socket.IO
+ # populated request context (i.e. request.sid is defined)
+ callback = _callback_wrapper
+ self.server.emit(event, *args, namespace=namespace, to=to,
+ skip_sid=skip_sid, callback=callback, **kwargs)
+
+ def call(self, event, *args, **kwargs): # pragma: no cover
+ """Emit a SocketIO event and wait for the response.
+
+ This method issues an emit with a callback and waits for the callback
+ to be invoked by the client before returning. If the callback isn’t
+ invoked before the timeout, then a TimeoutError exception is raised. If
+ the Socket.IO connection drops during the wait, this method still waits
+ until the specified timeout. Example::
+
+ def get_status(client, data):
+ status = call('status', {'data': data}, to=client)
+
+ :param event: The name of the user event to emit.
+ :param args: A dictionary with the JSON data to send as payload.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the global namespace.
+ :param to: The session ID of the recipient client.
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the client acknowledges the event, then a
+ ``TimeoutError`` exception is raised. The default is 60
+ seconds.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ client directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used, or when there is a
+ single addressee. It is recommended to always
+ leave this parameter with its default value of
+ ``False``.
+ """
+ namespace = kwargs.pop('namespace', '/')
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ return self.server.call(event, *args, namespace=namespace, to=to,
+ **kwargs)
+
+ def send(self, data, json=False, namespace=None, to=None,
+ callback=None, include_self=True, skip_sid=None, **kwargs):
+ """Send a server-generated SocketIO message.
+
+ This function sends a simple SocketIO message to one or more connected
+ clients. The message can be a string or a JSON blob. This is a simpler
+ version of ``emit()``, which should be preferred. This function can be
+ used outside of a SocketIO event context, so it is appropriate to use
+ when the server is the originator of an event.
+
+ :param data: The message to send, either a string or a JSON blob.
+ :param json: ``True`` if ``message`` is a JSON blob, ``False``
+ otherwise.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the global namespace.
+ :param to: Send the message to all the users in the given room, or to
+ the user with the given session ID. If this parameter is not
+ included, the event is sent to all connected users.
+ :param include_self: ``True`` to include the sender when broadcasting
+ or addressing a room, or ``False`` to send to
+ everyone but the sender.
+ :param skip_sid: The session id of a client to ignore when broadcasting
+ or addressing a room. This is typically set to the
+ originator of the message, so that everyone except
+ that client receive the message. To skip multiple sids
+ pass a list.
+ :param callback: If given, this function will be called to acknowledge
+ that the client has received the message. The
+ arguments that will be passed to the function are
+ those provided by the client. Callback functions can
+ only be used when addressing an individual client.
+ """
+ skip_sid = flask.request.sid if not include_self else skip_sid
+ if json:
+ self.emit('json', data, namespace=namespace, to=to,
+ skip_sid=skip_sid, callback=callback, **kwargs)
+ else:
+ self.emit('message', data, namespace=namespace, to=to,
+ skip_sid=skip_sid, callback=callback, **kwargs)
+
+ def close_room(self, room, namespace=None):
+ """Close a room.
+
+ This function removes any users that are in the given room and then
+ deletes the room from the server. This function can be used outside
+ of a SocketIO event context.
+
+ :param room: The name of the room to close.
+ :param namespace: The namespace under which the room exists. Defaults
+ to the global namespace.
+ """
+ self.server.close_room(room, namespace)
+
+ def run(self, app, host=None, port=None, **kwargs): # pragma: no cover
+ """Run the SocketIO web server.
+
+ :param app: The Flask application instance.
+ :param host: The hostname or IP address for the server to listen on.
+ Defaults to 127.0.0.1.
+ :param port: The port number for the server to listen on. Defaults to
+ 5000.
+ :param debug: ``True`` to start the server in debug mode, ``False`` to
+ start in normal mode.
+ :param use_reloader: ``True`` to enable the Flask reloader, ``False``
+ to disable it.
+ :param reloader_options: A dictionary with options that are passed to
+ the Flask reloader, such as ``extra_files``,
+ ``reloader_type``, etc.
+ :param extra_files: A list of additional files that the Flask
+ reloader should watch. Defaults to ``None``.
+ Deprecated, use ``reloader_options`` instead.
+ :param log_output: If ``True``, the server logs all incoming
+ connections. If ``False`` logging is disabled.
+ Defaults to ``True`` in debug mode, ``False``
+ in normal mode. Unused when the threading async
+ mode is used.
+ :param allow_unsafe_werkzeug: Set to ``True`` to allow the use of the
+ Werkzeug web server in a production
+ setting. Default is ``False``. Set to
+ ``True`` at your own risk.
+ :param kwargs: Additional web server options. The web server options
+ are specific to the server used in each of the supported
+ async modes. Note that options provided here will
+ not be seen when using an external web server such
+ as gunicorn, since this method is not called in that
+ case.
+ """
+ if host is None:
+ host = '127.0.0.1'
+ if port is None:
+ server_name = app.config['SERVER_NAME']
+ if server_name and ':' in server_name:
+ port = int(server_name.rsplit(':', 1)[1])
+ else:
+ port = 5000
+
+ debug = kwargs.pop('debug', app.debug)
+ log_output = kwargs.pop('log_output', debug)
+ use_reloader = kwargs.pop('use_reloader', debug)
+ extra_files = kwargs.pop('extra_files', None)
+ reloader_options = kwargs.pop('reloader_options', {})
+ if extra_files:
+ reloader_options['extra_files'] = extra_files
+
+ app.debug = debug
+ if app.debug and self.server.eio.async_mode != 'threading':
+ # put the debug middleware between the SocketIO middleware
+ # and the Flask application instance
+ #
+ # mw1 mw2 mw3 Flask app
+ # o ---- o ---- o ---- o
+ # /
+ # o Flask-SocketIO
+ # \ middleware
+ # o
+ # Flask-SocketIO WebSocket handler
+ #
+ # BECOMES
+ #
+ # dbg-mw mw1 mw2 mw3 Flask app
+ # o ---- o ---- o ---- o ---- o
+ # /
+ # o Flask-SocketIO
+ # \ middleware
+ # o
+ # Flask-SocketIO WebSocket handler
+ #
+ self.sockio_mw.wsgi_app = DebuggedApplication(
+ self.sockio_mw.wsgi_app, evalex=True)
+
+ allow_unsafe_werkzeug = kwargs.pop('allow_unsafe_werkzeug', False)
+ if self.server.eio.async_mode == 'threading':
+ try:
+ import simple_websocket # noqa: F401
+ except ImportError:
+ from werkzeug._internal import _log
+ _log('warning', 'WebSocket transport not available. Install '
+ 'simple-websocket for improved performance.')
+ if not sys.stdin or not sys.stdin.isatty(): # pragma: no cover
+ if not allow_unsafe_werkzeug:
+ raise RuntimeError('The Werkzeug web server is not '
+ 'designed to run in production. Pass '
+ 'allow_unsafe_werkzeug=True to the '
+ 'run() method to disable this error.')
+ else:
+ from werkzeug._internal import _log
+ _log('warning', ('Werkzeug appears to be used in a '
+ 'production deployment. Consider '
+ 'switching to a production web server '
+ 'instead.'))
+ app.run(host=host, port=port, threaded=True,
+ use_reloader=use_reloader, **reloader_options, **kwargs)
+ elif self.server.eio.async_mode == 'eventlet':
+ def run_server():
+ import eventlet
+ import eventlet.wsgi
+ import eventlet.green
+ addresses = eventlet.green.socket.getaddrinfo(host, port)
+ if not addresses:
+ raise RuntimeError(
+ 'Could not resolve host to a valid address')
+ eventlet_socket = eventlet.listen(addresses[0][4],
+ addresses[0][0])
+
+ # If provided an SSL argument, use an SSL socket
+ ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
+ 'ssl_version', 'ca_certs',
+ 'do_handshake_on_connect', 'suppress_ragged_eofs',
+ 'ciphers']
+ ssl_params = {k: kwargs[k] for k in kwargs
+ if k in ssl_args and kwargs[k] is not None}
+ for k in ssl_args:
+ kwargs.pop(k, None)
+ if len(ssl_params) > 0:
+ ssl_params['server_side'] = True # Listening requires true
+ eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
+ **ssl_params)
+
+ eventlet.wsgi.server(eventlet_socket, app,
+ log_output=log_output, **kwargs)
+
+ if use_reloader:
+ run_with_reloader(run_server, **reloader_options)
+ else:
+ run_server()
+ elif self.server.eio.async_mode == 'gevent':
+ from gevent import pywsgi
+ try:
+ from geventwebsocket.handler import WebSocketHandler
+ websocket = True
+ except ImportError:
+ app.logger.warning(
+ 'WebSocket transport not available. Install '
+ 'gevent-websocket for improved performance.')
+ websocket = False
+
+ log = 'default'
+ if not log_output:
+ log = None
+ if websocket:
+ self.wsgi_server = pywsgi.WSGIServer(
+ (host, port), app, handler_class=WebSocketHandler,
+ log=log, **kwargs)
+ else:
+ self.wsgi_server = pywsgi.WSGIServer((host, port), app,
+ log=log, **kwargs)
+
+ if use_reloader:
+ # monkey patching is required by the reloader
+ from gevent import monkey
+ monkey.patch_thread()
+ monkey.patch_time()
+
+ def run_server():
+ self.wsgi_server.serve_forever()
+
+ run_with_reloader(run_server, **reloader_options)
+ else:
+ self.wsgi_server.serve_forever()
+
+ def stop(self):
+ """Stop a running SocketIO web server.
+
+ This method must be called from a HTTP or SocketIO handler function.
+ """
+ if self.server.eio.async_mode == 'threading':
+ func = flask.request.environ.get('werkzeug.server.shutdown')
+ if func:
+ func()
+ else:
+ raise RuntimeError('Cannot stop unknown web server')
+ elif self.server.eio.async_mode == 'eventlet':
+ raise SystemExit
+ elif self.server.eio.async_mode == 'gevent':
+ self.wsgi_server.stop()
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task using the appropriate async model.
+
+ This is a utility function that applications can use to start a
+ background task using the method that is compatible with the
+ selected async mode.
+
+ :param target: the target function to execute.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ This function returns an object that represents the background task,
+ on which the ``join()`` method can be invoked to wait for the task to
+ complete.
+ """
+ return self.server.start_background_task(target, *args, **kwargs)
+
+ def sleep(self, seconds=0):
+ """Sleep for the requested amount of time using the appropriate async
+ model.
+
+ This is a utility function that applications can use to put a task to
+ sleep without having to worry about using the correct call for the
+ selected async mode.
+ """
+ return self.server.sleep(seconds)
+
+ def test_client(self, app, namespace=None, query_string=None,
+ headers=None, auth=None, flask_test_client=None):
+ """The Socket.IO test client is useful for testing a Flask-SocketIO
+ server. It works in a similar way to the Flask Test Client, but
+ adapted to the Socket.IO server.
+
+ :param app: The Flask application instance.
+ :param namespace: The namespace for the client. If not provided, the
+ client connects to the server on the global
+ namespace.
+ :param query_string: A string with custom query string arguments.
+ :param headers: A dictionary with custom HTTP headers.
+ :param auth: Optional authentication data, given as a dictionary.
+ :param flask_test_client: The instance of the Flask test client
+ currently in use. Passing the Flask test
+ client is optional, but is necessary if you
+ want the Flask user session and any other
+ cookies set in HTTP routes accessible from
+ Socket.IO events.
+ """
+ return SocketIOTestClient(app, self, namespace=namespace,
+ query_string=query_string, headers=headers,
+ auth=auth,
+ flask_test_client=flask_test_client)
+
+ def _handle_event(self, handler, message, namespace, sid, *args):
+ environ = self.server.get_environ(sid, namespace=namespace)
+ if not environ:
+ # we don't have record of this client, ignore this event
+ return '', 400
+ app = environ['flask.app']
+ with app.request_context(environ):
+ if self.manage_session:
+ # manage a separate session for this client's Socket.IO events
+ # created as a copy of the regular user session
+ if 'saved_session' not in environ:
+ environ['saved_session'] = _ManagedSession(flask.session)
+ session_obj = environ['saved_session']
+ if hasattr(flask, 'globals') and \
+ hasattr(flask.globals, 'request_ctx'):
+ # update session for Flask >= 2.2
+ ctx = flask.globals.request_ctx._get_current_object()
+ else: # pragma: no cover
+ # update session for Flask < 2.2
+ ctx = flask._request_ctx_stack.top
+ ctx.session = session_obj
+ else:
+ # let Flask handle the user session
+ # for cookie based sessions, this effectively freezes the
+ # session to its state at connection time
+ # for server-side sessions, this allows HTTP and Socket.IO to
+ # share the session, with both having read/write access to it
+ session_obj = flask.session._get_current_object()
+ flask.request.sid = sid
+ flask.request.namespace = namespace
+ flask.request.event = {'message': message, 'args': args}
+ try:
+ if message == 'connect':
+ auth = args[1] if len(args) > 1 else None
+ try:
+ ret = handler(auth)
+ except TypeError:
+ ret = handler()
+ else:
+ ret = handler(*args)
+ except ConnectionRefusedError:
+ raise # let this error bubble up to python-socketio
+ except:
+ err_handler = self.exception_handlers.get(
+ namespace, self.default_exception_handler)
+ if err_handler is None:
+ raise
+ type, value, traceback = sys.exc_info()
+ return err_handler(value)
+ if not self.manage_session:
+ # when Flask is managing the user session, it needs to save it
+ if not hasattr(session_obj, 'modified') or \
+ session_obj.modified:
+ resp = app.response_class()
+ app.session_interface.save_session(app, session_obj, resp)
+ return ret
+
+
+def emit(event, *args, **kwargs):
+ """Emit a SocketIO event.
+
+ This function emits a SocketIO event to one or more connected clients. A
+ JSON blob can be attached to the event as payload. This is a function that
+ can only be called from a SocketIO event handler, as in obtains some
+ information from the current client context. Example::
+
+ @socketio.on('my event')
+ def handle_my_custom_event(json):
+ emit('my response', {'data': 42})
+
+ :param event: The name of the user event to emit.
+ :param args: A dictionary with the JSON data to send as payload.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the namespace used by the originating event.
+ A ``'/'`` can be used to explicitly specify the global
+ namespace.
+ :param callback: Callback function to invoke with the client's
+ acknowledgement.
+ :param broadcast: ``True`` to send the message to all clients, or ``False``
+ to only reply to the sender of the originating event.
+ :param to: Send the message to all the users in the given room, or to the
+ user with the given session ID. If this argument is not set and
+ ``broadcast`` is ``False``, then the message is sent only to the
+ originating user.
+ :param include_self: ``True`` to include the sender when broadcasting or
+ addressing a room, or ``False`` to send to everyone
+ but the sender.
+ :param skip_sid: The session id of a client to ignore when broadcasting
+ or addressing a room. This is typically set to the
+ originator of the message, so that everyone except
+ that client receive the message. To skip multiple sids
+ pass a list.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ clients directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used, or when there is a
+ single addressee. It is recommended to always leave
+ this parameter with its default value of ``False``.
+ """
+ if 'namespace' in kwargs:
+ namespace = kwargs['namespace']
+ else:
+ namespace = flask.request.namespace
+ callback = kwargs.get('callback')
+ broadcast = kwargs.get('broadcast')
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ if to is None and not broadcast:
+ to = flask.request.sid
+ include_self = kwargs.get('include_self', True)
+ skip_sid = kwargs.get('skip_sid')
+ ignore_queue = kwargs.get('ignore_queue', False)
+
+ socketio = flask.current_app.extensions['socketio']
+ return socketio.emit(event, *args, namespace=namespace, to=to,
+ include_self=include_self, skip_sid=skip_sid,
+ callback=callback, ignore_queue=ignore_queue)
+
+
+def call(event, *args, **kwargs): # pragma: no cover
+ """Emit a SocketIO event and wait for the response.
+
+ This function issues an emit with a callback and waits for the callback to
+ be invoked by the client before returning. If the callback isn’t invoked
+ before the timeout, then a TimeoutError exception is raised. If the
+ Socket.IO connection drops during the wait, this method still waits until
+ the specified timeout. Example::
+
+ def get_status(client, data):
+ status = call('status', {'data': data}, to=client)
+
+ :param event: The name of the user event to emit.
+ :param args: A dictionary with the JSON data to send as payload.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the namespace used by the originating event.
+ A ``'/'`` can be used to explicitly specify the global
+ namespace.
+ :param to: The session ID of the recipient client. If this argument is not
+ given, the event is sent to the originating client.
+ :param timeout: The waiting timeout. If the timeout is reached before the
+ client acknowledges the event, then a ``TimeoutError``
+ exception is raised. The default is 60 seconds.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ client directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used, or when there is a
+ single addressee. It is recommended to always leave
+ this parameter with its default value of ``False``.
+ """
+ if 'namespace' in kwargs:
+ namespace = kwargs['namespace']
+ else:
+ namespace = flask.request.namespace
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ if to is None:
+ to = flask.request.sid
+ timeout = kwargs.get('timeout', 60)
+ ignore_queue = kwargs.get('ignore_queue', False)
+
+ socketio = flask.current_app.extensions['socketio']
+ return socketio.call(event, *args, namespace=namespace, to=to,
+ ignore_queue=ignore_queue, timeout=timeout)
+
+
+def send(message, **kwargs):
+ """Send a SocketIO message.
+
+ This function sends a simple SocketIO message to one or more connected
+ clients. The message can be a string or a JSON blob. This is a simpler
+ version of ``emit()``, which should be preferred. This is a function that
+ can only be called from a SocketIO event handler.
+
+ :param message: The message to send, either a string or a JSON blob.
+ :param json: ``True`` if ``message`` is a JSON blob, ``False``
+ otherwise.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the namespace used by the originating event.
+ An empty string can be used to use the global namespace.
+ :param callback: Callback function to invoke with the client's
+ acknowledgement.
+ :param broadcast: ``True`` to send the message to all connected clients, or
+ ``False`` to only reply to the sender of the originating
+ event.
+ :param to: Send the message to all the users in the given room, or to the
+ user with the given session ID. If this argument is not set and
+ ``broadcast`` is ``False``, then the message is sent only to the
+ originating user.
+ :param include_self: ``True`` to include the sender when broadcasting or
+ addressing a room, or ``False`` to send to everyone
+ but the sender.
+ :param skip_sid: The session id of a client to ignore when broadcasting
+ or addressing a room. This is typically set to the
+ originator of the message, so that everyone except
+ that client receive the message. To skip multiple sids
+ pass a list.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ clients directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used, or when there is a
+ single addressee. It is recommended to always leave
+ this parameter with its default value of ``False``.
+ """
+ json = kwargs.get('json', False)
+ if 'namespace' in kwargs:
+ namespace = kwargs['namespace']
+ else:
+ namespace = flask.request.namespace
+ callback = kwargs.get('callback')
+ broadcast = kwargs.get('broadcast')
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ if to is None and not broadcast:
+ to = flask.request.sid
+ include_self = kwargs.get('include_self', True)
+ skip_sid = kwargs.get('skip_sid')
+ ignore_queue = kwargs.get('ignore_queue', False)
+
+ socketio = flask.current_app.extensions['socketio']
+ return socketio.send(message, json=json, namespace=namespace, to=to,
+ include_self=include_self, skip_sid=skip_sid,
+ callback=callback, ignore_queue=ignore_queue)
+
+
+def join_room(room, sid=None, namespace=None):
+ """Join a room.
+
+ This function puts the user in a room, under the current namespace. The
+ user and the namespace are obtained from the event context. This is a
+ function that can only be called from a SocketIO event handler. Example::
+
+ @socketio.on('join')
+ def on_join(data):
+ username = session['username']
+ room = data['room']
+ join_room(room)
+ send(username + ' has entered the room.', to=room)
+
+ :param room: The name of the room to join.
+ :param sid: The session id of the client. If not provided, the client is
+ obtained from the request context.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ sid = sid or flask.request.sid
+ namespace = namespace or flask.request.namespace
+ socketio.server.enter_room(sid, room, namespace=namespace)
+
+
+def leave_room(room, sid=None, namespace=None):
+ """Leave a room.
+
+ This function removes the user from a room, under the current namespace.
+ The user and the namespace are obtained from the event context. Example::
+
+ @socketio.on('leave')
+ def on_leave(data):
+ username = session['username']
+ room = data['room']
+ leave_room(room)
+ send(username + ' has left the room.', to=room)
+
+ :param room: The name of the room to leave.
+ :param sid: The session id of the client. If not provided, the client is
+ obtained from the request context.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ sid = sid or flask.request.sid
+ namespace = namespace or flask.request.namespace
+ socketio.server.leave_room(sid, room, namespace=namespace)
+
+
+def close_room(room, namespace=None):
+ """Close a room.
+
+ This function removes any users that are in the given room and then deletes
+ the room from the server.
+
+ :param room: The name of the room to close.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ namespace = namespace or flask.request.namespace
+ socketio.server.close_room(room, namespace=namespace)
+
+
+def rooms(sid=None, namespace=None):
+ """Return a list of the rooms the client is in.
+
+ This function returns all the rooms the client has entered, including its
+ own room, assigned by the Socket.IO server.
+
+ :param sid: The session id of the client. If not provided, the client is
+ obtained from the request context.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ sid = sid or flask.request.sid
+ namespace = namespace or flask.request.namespace
+ return socketio.server.rooms(sid, namespace=namespace)
+
+
+def disconnect(sid=None, namespace=None, silent=False):
+ """Disconnect the client.
+
+ This function terminates the connection with the client. As a result of
+ this call the client will receive a disconnect event. Example::
+
+ @socketio.on('message')
+ def receive_message(msg):
+ if is_banned(session['username']):
+ disconnect()
+ else:
+ # ...
+
+ :param sid: The session id of the client. If not provided, the client is
+ obtained from the request context.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ :param silent: this option is deprecated.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ sid = sid or flask.request.sid
+ namespace = namespace or flask.request.namespace
+ return socketio.server.disconnect(sid, namespace=namespace)
diff --git a/.venv/Lib/site-packages/flask_socketio/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/flask_socketio/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..a46a5cb
Binary files /dev/null and b/.venv/Lib/site-packages/flask_socketio/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/flask_socketio/__pycache__/namespace.cpython-312.pyc b/.venv/Lib/site-packages/flask_socketio/__pycache__/namespace.cpython-312.pyc
new file mode 100644
index 0000000..f0fb754
Binary files /dev/null and b/.venv/Lib/site-packages/flask_socketio/__pycache__/namespace.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/flask_socketio/__pycache__/test_client.cpython-312.pyc b/.venv/Lib/site-packages/flask_socketio/__pycache__/test_client.cpython-312.pyc
new file mode 100644
index 0000000..86c32b0
Binary files /dev/null and b/.venv/Lib/site-packages/flask_socketio/__pycache__/test_client.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/flask_socketio/namespace.py b/.venv/Lib/site-packages/flask_socketio/namespace.py
new file mode 100644
index 0000000..43833a9
--- /dev/null
+++ b/.venv/Lib/site-packages/flask_socketio/namespace.py
@@ -0,0 +1,46 @@
+from socketio import Namespace as _Namespace
+
+
+class Namespace(_Namespace):
+ def __init__(self, namespace=None):
+ super(Namespace, self).__init__(namespace)
+ self.socketio = None
+
+ def _set_socketio(self, socketio):
+ self.socketio = socketio
+
+ def trigger_event(self, event, *args):
+ """Dispatch an event to the proper handler method.
+
+ In the most common usage, this method is not overloaded by subclasses,
+ as it performs the routing of events to methods. However, this
+ method can be overridden if special dispatching rules are needed, or if
+ having a single method that catches all events is desired.
+ """
+ handler_name = 'on_' + event
+ if not hasattr(self, handler_name):
+ # there is no handler for this event, so we ignore it
+ return
+ handler = getattr(self, handler_name)
+ return self.socketio._handle_event(handler, event, self.namespace,
+ *args)
+
+ def emit(self, event, data=None, room=None, include_self=True,
+ namespace=None, callback=None):
+ """Emit a custom event to one or more connected clients."""
+ return self.socketio.emit(event, data, room=room,
+ include_self=include_self,
+ namespace=namespace or self.namespace,
+ callback=callback)
+
+ def send(self, data, room=None, include_self=True, namespace=None,
+ callback=None):
+ """Send a message to one or more connected clients."""
+ return self.socketio.send(data, room=room, include_self=include_self,
+ namespace=namespace or self.namespace,
+ callback=callback)
+
+ def close_room(self, room, namespace=None):
+ """Close a room."""
+ return self.socketio.close_room(room=room,
+ namespace=namespace or self.namespace)
diff --git a/.venv/Lib/site-packages/flask_socketio/test_client.py b/.venv/Lib/site-packages/flask_socketio/test_client.py
new file mode 100644
index 0000000..0603d77
--- /dev/null
+++ b/.venv/Lib/site-packages/flask_socketio/test_client.py
@@ -0,0 +1,236 @@
+import uuid
+
+from socketio import packet
+from socketio.pubsub_manager import PubSubManager
+from werkzeug.test import EnvironBuilder
+
+
+class SocketIOTestClient(object):
+ """
+ This class is useful for testing a Flask-SocketIO server. It works in a
+ similar way to the Flask Test Client, but adapted to the Socket.IO server.
+
+ :param app: The Flask application instance.
+ :param socketio: The application's ``SocketIO`` instance.
+ :param namespace: The namespace for the client. If not provided, the client
+ connects to the server on the global namespace.
+ :param query_string: A string with custom query string arguments.
+ :param headers: A dictionary with custom HTTP headers.
+ :param auth: Optional authentication data, given as a dictionary.
+ :param flask_test_client: The instance of the Flask test client
+ currently in use. Passing the Flask test
+ client is optional, but is necessary if you
+ want the Flask user session and any other
+ cookies set in HTTP routes accessible from
+ Socket.IO events.
+ """
+ clients = {}
+
+ def __init__(self, app, socketio, namespace=None, query_string=None,
+ headers=None, auth=None, flask_test_client=None):
+ def _mock_send_packet(eio_sid, pkt):
+ # make sure the packet can be encoded and decoded
+ epkt = pkt.encode()
+ if not isinstance(epkt, list):
+ pkt = packet.Packet(encoded_packet=epkt)
+ else:
+ pkt = packet.Packet(encoded_packet=epkt[0])
+ for att in epkt[1:]:
+ pkt.add_attachment(att)
+ client = self.clients.get(eio_sid)
+ if not client:
+ return
+ if pkt.packet_type == packet.EVENT or \
+ pkt.packet_type == packet.BINARY_EVENT:
+ if pkt.data[0] == 'message' or pkt.data[0] == 'json':
+ client.queue.append({
+ 'name': pkt.data[0],
+ 'args': pkt.data[1],
+ 'namespace': pkt.namespace or '/'})
+ else:
+ client.queue.append({
+ 'name': pkt.data[0],
+ 'args': pkt.data[1:],
+ 'namespace': pkt.namespace or '/'})
+ elif pkt.packet_type == packet.ACK or \
+ pkt.packet_type == packet.BINARY_ACK:
+ client.acks = {'args': pkt.data,
+ 'namespace': pkt.namespace or '/'}
+ elif pkt.packet_type in [packet.DISCONNECT, packet.CONNECT_ERROR]:
+ client.connected[pkt.namespace or '/'] = False
+
+ _current_packet = None
+
+ def _mock_send_eio_packet(eio_sid, eio_pkt):
+ nonlocal _current_packet
+ if _current_packet is not None:
+ _current_packet.add_attachment(eio_pkt.data)
+ if _current_packet.attachment_count == \
+ len(_current_packet.attachments):
+ _mock_send_packet(eio_sid, _current_packet)
+ _current_packet = None
+ else:
+ pkt = packet.Packet(encoded_packet=eio_pkt.data)
+ if pkt.attachment_count == 0:
+ _mock_send_packet(eio_sid, pkt)
+ else:
+ _current_packet = pkt
+
+ self.app = app
+ self.flask_test_client = flask_test_client
+ self.eio_sid = uuid.uuid4().hex
+ self.clients[self.eio_sid] = self
+ self.callback_counter = 0
+ self.socketio = socketio
+ self.connected = {}
+ self.queue = []
+ self.acks = None
+ socketio.server._send_packet = _mock_send_packet
+ socketio.server._send_eio_packet = _mock_send_eio_packet
+ socketio.server.environ[self.eio_sid] = {}
+ socketio.server.async_handlers = False # easier to test when
+ socketio.server.eio.async_handlers = False # events are sync
+ if isinstance(socketio.server.manager, PubSubManager):
+ raise RuntimeError('Test client cannot be used with a message '
+ 'queue. Disable the queue on your test '
+ 'configuration.')
+ socketio.server.manager.initialize()
+ self.connect(namespace=namespace, query_string=query_string,
+ headers=headers, auth=auth)
+
+ def is_connected(self, namespace=None):
+ """Check if a namespace is connected.
+
+ :param namespace: The namespace to check. The global namespace is
+ assumed if this argument is not provided.
+ """
+ return self.connected.get(namespace or '/', False)
+
+ def connect(self, namespace=None, query_string=None, headers=None,
+ auth=None):
+ """Connect the client.
+
+ :param namespace: The namespace for the client. If not provided, the
+ client connects to the server on the global
+ namespace.
+ :param query_string: A string with custom query string arguments.
+ :param headers: A dictionary with custom HTTP headers.
+ :param auth: Optional authentication data, given as a dictionary.
+
+ Note that it is usually not necessary to explicitly call this method,
+ since a connection is automatically established when an instance of
+ this class is created. An example where it this method would be useful
+ is when the application accepts multiple namespace connections.
+ """
+ url = '/socket.io'
+ namespace = namespace or '/'
+ if query_string:
+ if query_string[0] != '?':
+ query_string = '?' + query_string
+ url += query_string
+ environ = EnvironBuilder(url, headers=headers).get_environ()
+ environ['flask.app'] = self.app
+ if self.flask_test_client:
+ # inject cookies from Flask
+ if hasattr(self.flask_test_client, '_add_cookies_to_wsgi'):
+ # flask >= 2.3
+ self.flask_test_client._add_cookies_to_wsgi(environ)
+ else: # pragma: no cover
+ # flask < 2.3
+ self.flask_test_client.cookie_jar.inject_wsgi(environ)
+ self.socketio.server._handle_eio_connect(self.eio_sid, environ)
+ pkt = packet.Packet(packet.CONNECT, auth, namespace=namespace)
+ self.socketio.server._handle_eio_message(self.eio_sid, pkt.encode())
+ sid = self.socketio.server.manager.sid_from_eio_sid(self.eio_sid,
+ namespace)
+ if sid:
+ self.connected[namespace] = True
+
+ def disconnect(self, namespace=None):
+ """Disconnect the client.
+
+ :param namespace: The namespace to disconnect. The global namespace is
+ assumed if this argument is not provided.
+ """
+ if not self.is_connected(namespace):
+ raise RuntimeError('not connected')
+ pkt = packet.Packet(packet.DISCONNECT, namespace=namespace)
+ self.socketio.server._handle_eio_message(self.eio_sid, pkt.encode())
+ del self.connected[namespace or '/']
+
+ def emit(self, event, *args, **kwargs):
+ """Emit an event to the server.
+
+ :param event: The event name.
+ :param *args: The event arguments.
+ :param callback: ``True`` if the client requests a callback, ``False``
+ if not. Note that client-side callbacks are not
+ implemented, a callback request will just tell the
+ server to provide the arguments to invoke the
+ callback, but no callback is invoked. Instead, the
+ arguments that the server provided for the callback
+ are returned by this function.
+ :param namespace: The namespace of the event. The global namespace is
+ assumed if this argument is not provided.
+ """
+ namespace = kwargs.pop('namespace', None)
+ if not self.is_connected(namespace):
+ raise RuntimeError('not connected')
+ callback = kwargs.pop('callback', False)
+ id = None
+ if callback:
+ self.callback_counter += 1
+ id = self.callback_counter
+ pkt = packet.Packet(packet.EVENT, data=[event] + list(args),
+ namespace=namespace, id=id)
+ encoded_pkt = pkt.encode()
+ if isinstance(encoded_pkt, list):
+ for epkt in encoded_pkt:
+ self.socketio.server._handle_eio_message(self.eio_sid, epkt)
+ else:
+ self.socketio.server._handle_eio_message(self.eio_sid, encoded_pkt)
+ if self.acks is not None:
+ ack = self.acks
+ self.acks = None
+ return ack['args'][0] if len(ack['args']) == 1 \
+ else ack['args']
+
+ def send(self, data, json=False, callback=False, namespace=None):
+ """Send a text or JSON message to the server.
+
+ :param data: A string, dictionary or list to send to the server.
+ :param json: ``True`` to send a JSON message, ``False`` to send a text
+ message.
+ :param callback: ``True`` if the client requests a callback, ``False``
+ if not. Note that client-side callbacks are not
+ implemented, a callback request will just tell the
+ server to provide the arguments to invoke the
+ callback, but no callback is invoked. Instead, the
+ arguments that the server provided for the callback
+ are returned by this function.
+ :param namespace: The namespace of the event. The global namespace is
+ assumed if this argument is not provided.
+ """
+ if json:
+ msg = 'json'
+ else:
+ msg = 'message'
+ return self.emit(msg, data, callback=callback, namespace=namespace)
+
+ def get_received(self, namespace=None):
+ """Return the list of messages received from the server.
+
+ Since this is not a real client, any time the server emits an event,
+ the event is simply stored. The test code can invoke this method to
+ obtain the list of events that were received since the last call.
+
+ :param namespace: The namespace to get events from. The global
+ namespace is assumed if this argument is not
+ provided.
+ """
+ if not self.is_connected(namespace):
+ raise RuntimeError('not connected')
+ namespace = namespace or '/'
+ r = [pkt for pkt in self.queue if pkt['namespace'] == namespace]
+ self.queue = [pkt for pkt in self.queue if pkt not in r]
+ return r
diff --git a/.venv/Lib/site-packages/h11-0.14.0.dist-info/INSTALLER b/.venv/Lib/site-packages/h11-0.14.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/.venv/Lib/site-packages/h11-0.14.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/h11-0.14.0.dist-info/LICENSE.txt b/.venv/Lib/site-packages/h11-0.14.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000..8f080ea
--- /dev/null
+++ b/.venv/Lib/site-packages/h11-0.14.0.dist-info/LICENSE.txt
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Nathaniel J. Smith and other contributors
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/.venv/Lib/site-packages/h11-0.14.0.dist-info/METADATA b/.venv/Lib/site-packages/h11-0.14.0.dist-info/METADATA
new file mode 100644
index 0000000..cf12a82
--- /dev/null
+++ b/.venv/Lib/site-packages/h11-0.14.0.dist-info/METADATA
@@ -0,0 +1,193 @@
+Metadata-Version: 2.1
+Name: h11
+Version: 0.14.0
+Summary: A pure-Python, bring-your-own-I/O implementation of HTTP/1.1
+Home-page: https://github.com/python-hyper/h11
+Author: Nathaniel J. Smith
+Author-email: njs@pobox.com
+License: MIT
+Classifier: Development Status :: 3 - Alpha
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Topic :: Internet :: WWW/HTTP
+Classifier: Topic :: System :: Networking
+Requires-Python: >=3.7
+License-File: LICENSE.txt
+Requires-Dist: typing-extensions ; python_version < "3.8"
+
+h11
+===
+
+.. image:: https://travis-ci.org/python-hyper/h11.svg?branch=master
+ :target: https://travis-ci.org/python-hyper/h11
+ :alt: Automated test status
+
+.. image:: https://codecov.io/gh/python-hyper/h11/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/python-hyper/h11
+ :alt: Test coverage
+
+.. image:: https://readthedocs.org/projects/h11/badge/?version=latest
+ :target: http://h11.readthedocs.io/en/latest/?badge=latest
+ :alt: Documentation Status
+
+This is a little HTTP/1.1 library written from scratch in Python,
+heavily inspired by `hyper-h2 `_.
+
+It's a "bring-your-own-I/O" library; h11 contains no IO code
+whatsoever. This means you can hook h11 up to your favorite network
+API, and that could be anything you want: synchronous, threaded,
+asynchronous, or your own implementation of `RFC 6214
+`_ -- h11 won't judge you.
+(Compare this to the current state of the art, where every time a `new
+network API `_ comes along then someone
+gets to start over reimplementing the entire HTTP protocol from
+scratch.) Cory Benfield made an `excellent blog post describing the
+benefits of this approach
+`_, or if you like video
+then here's his `PyCon 2016 talk on the same theme
+`_.
+
+This also means that h11 is not immediately useful out of the box:
+it's a toolkit for building programs that speak HTTP, not something
+that could directly replace ``requests`` or ``twisted.web`` or
+whatever. But h11 makes it much easier to implement something like
+``requests`` or ``twisted.web``.
+
+At a high level, working with h11 goes like this:
+
+1) First, create an ``h11.Connection`` object to track the state of a
+ single HTTP/1.1 connection.
+
+2) When you read data off the network, pass it to
+ ``conn.receive_data(...)``; you'll get back a list of objects
+ representing high-level HTTP "events".
+
+3) When you want to send a high-level HTTP event, create the
+ corresponding "event" object and pass it to ``conn.send(...)``;
+ this will give you back some bytes that you can then push out
+ through the network.
+
+For example, a client might instantiate and then send a
+``h11.Request`` object, then zero or more ``h11.Data`` objects for the
+request body (e.g., if this is a POST), and then a
+``h11.EndOfMessage`` to indicate the end of the message. Then the
+server would then send back a ``h11.Response``, some ``h11.Data``, and
+its own ``h11.EndOfMessage``. If either side violates the protocol,
+you'll get a ``h11.ProtocolError`` exception.
+
+h11 is suitable for implementing both servers and clients, and has a
+pleasantly symmetric API: the events you send as a client are exactly
+the ones that you receive as a server and vice-versa.
+
+`Here's an example of a tiny HTTP client
+`_
+
+It also has `a fine manual `_.
+
+FAQ
+---
+
+*Whyyyyy?*
+
+I wanted to play with HTTP in `Curio
+`__ and `Trio
+`__, which at the time didn't have any
+HTTP libraries. So I thought, no big deal, Python has, like, a dozen
+different implementations of HTTP, surely I can find one that's
+reusable. I didn't find one, but I did find Cory's call-to-arms
+blog-post. So I figured, well, fine, if I have to implement HTTP from
+scratch, at least I can make sure no-one *else* has to ever again.
+
+*Should I use it?*
+
+Maybe. You should be aware that it's a very young project. But, it's
+feature complete and has an exhaustive test-suite and complete docs,
+so the next step is for people to try using it and see how it goes
+:-). If you do then please let us know -- if nothing else we'll want
+to talk to you before making any incompatible changes!
+
+*What are the features/limitations?*
+
+Roughly speaking, it's trying to be a robust, complete, and non-hacky
+implementation of the first "chapter" of the HTTP/1.1 spec: `RFC 7230:
+HTTP/1.1 Message Syntax and Routing
+`_. That is, it mostly focuses on
+implementing HTTP at the level of taking bytes on and off the wire,
+and the headers related to that, and tries to be anal about spec
+conformance. It doesn't know about higher-level concerns like URL
+routing, conditional GETs, cross-origin cookie policies, or content
+negotiation. But it does know how to take care of framing,
+cross-version differences in keep-alive handling, and the "obsolete
+line folding" rule, so you can focus your energies on the hard /
+interesting parts for your application, and it tries to support the
+full specification in the sense that any useful HTTP/1.1 conformant
+application should be able to use h11.
+
+It's pure Python, and has no dependencies outside of the standard
+library.
+
+It has a test suite with 100.0% coverage for both statements and
+branches.
+
+Currently it supports Python 3 (testing on 3.7-3.10) and PyPy 3.
+The last Python 2-compatible version was h11 0.11.x.
+(Originally it had a Cython wrapper for `http-parser
+`_ and a beautiful nested state
+machine implemented with ``yield from`` to postprocess the output. But
+I had to take these out -- the new *parser* needs fewer lines-of-code
+than the old *parser wrapper*, is written in pure Python, uses no
+exotic language syntax, and has more features. It's sad, really; that
+old state machine was really slick. I just need a few sentences here
+to mourn that.)
+
+I don't know how fast it is. I haven't benchmarked or profiled it yet,
+so it's probably got a few pointless hot spots, and I've been trying
+to err on the side of simplicity and robustness instead of
+micro-optimization. But at the architectural level I tried hard to
+avoid fundamentally bad decisions, e.g., I believe that all the
+parsing algorithms remain linear-time even in the face of pathological
+input like slowloris, and there are no byte-by-byte loops. (I also
+believe that it maintains bounded memory usage in the face of
+arbitrary/pathological input.)
+
+The whole library is ~800 lines-of-code. You can read and understand
+the whole thing in less than an hour. Most of the energy invested in
+this so far has been spent on trying to keep things simple by
+minimizing special-cases and ad hoc state manipulation; even though it
+is now quite small and simple, I'm still annoyed that I haven't
+figured out how to make it even smaller and simpler. (Unfortunately,
+HTTP does not lend itself to simplicity.)
+
+The API is ~feature complete and I don't expect the general outlines
+to change much, but you can't judge an API's ergonomics until you
+actually document and use it, so I'd expect some changes in the
+details.
+
+*How do I try it?*
+
+.. code-block:: sh
+
+ $ pip install h11
+ $ git clone git@github.com:python-hyper/h11
+ $ cd h11/examples
+ $ python basic-client.py
+
+and go from there.
+
+*License?*
+
+MIT
+
+*Code of conduct?*
+
+Contributors are requested to follow our `code of conduct
+`_ in
+all project spaces.
diff --git a/.venv/Lib/site-packages/h11-0.14.0.dist-info/RECORD b/.venv/Lib/site-packages/h11-0.14.0.dist-info/RECORD
new file mode 100644
index 0000000..7fdd1f6
--- /dev/null
+++ b/.venv/Lib/site-packages/h11-0.14.0.dist-info/RECORD
@@ -0,0 +1,52 @@
+h11-0.14.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+h11-0.14.0.dist-info/LICENSE.txt,sha256=N9tbuFkm2yikJ6JYZ_ELEjIAOuob5pzLhRE4rbjm82E,1124
+h11-0.14.0.dist-info/METADATA,sha256=B7pZ0m7WBXNs17vl6hUH9bJTL9s37DaGvY31w7jNxSg,8175
+h11-0.14.0.dist-info/RECORD,,
+h11-0.14.0.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
+h11-0.14.0.dist-info/top_level.txt,sha256=F7dC4jl3zeh8TGHEPaWJrMbeuoWbS379Gwdi-Yvdcis,4
+h11/__init__.py,sha256=iO1KzkSO42yZ6ffg-VMgbx_ZVTWGUY00nRYEWn-s3kY,1507
+h11/__pycache__/__init__.cpython-312.pyc,,
+h11/__pycache__/_abnf.cpython-312.pyc,,
+h11/__pycache__/_connection.cpython-312.pyc,,
+h11/__pycache__/_events.cpython-312.pyc,,
+h11/__pycache__/_headers.cpython-312.pyc,,
+h11/__pycache__/_readers.cpython-312.pyc,,
+h11/__pycache__/_receivebuffer.cpython-312.pyc,,
+h11/__pycache__/_state.cpython-312.pyc,,
+h11/__pycache__/_util.cpython-312.pyc,,
+h11/__pycache__/_version.cpython-312.pyc,,
+h11/__pycache__/_writers.cpython-312.pyc,,
+h11/_abnf.py,sha256=ybixr0xsupnkA6GFAyMubuXF6Tc1lb_hF890NgCsfNc,4815
+h11/_connection.py,sha256=eS2sorMD0zKLCFiB9lW9W9F_Nzny2tjHa4e6s1ujr1c,26539
+h11/_events.py,sha256=LEfuvg1AbhHaVRwxCd0I-pFn9-ezUOaoL8o2Kvy1PBA,11816
+h11/_headers.py,sha256=RqB8cd8CN0blYPzcLe5qeCh-phv6D1U_CHj4hs67lgQ,10230
+h11/_readers.py,sha256=EbSed0jzwVUiD1nOPAeUcVE4Flf3wXkxfb8c06-OTBM,8383
+h11/_receivebuffer.py,sha256=xrspsdsNgWFxRfQcTXxR8RrdjRXXTK0Io5cQYWpJ1Ws,5252
+h11/_state.py,sha256=k1VL6SDbaPkSrZ-49ewCXDpuiUS69_46YhbWjuV1qEY,13300
+h11/_util.py,sha256=LWkkjXyJaFlAy6Lt39w73UStklFT5ovcvo0TkY7RYuk,4888
+h11/_version.py,sha256=LVyTdiZRzIIEv79UyOgbM5iUrJUllEzlCWaJEYBY1zc,686
+h11/_writers.py,sha256=oFKm6PtjeHfbj4RLX7VB7KDc1gIY53gXG3_HR9ltmTA,5081
+h11/py.typed,sha256=sow9soTwP9T_gEAQSVh7Gb8855h04Nwmhs2We-JRgZM,7
+h11/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+h11/tests/__pycache__/__init__.cpython-312.pyc,,
+h11/tests/__pycache__/helpers.cpython-312.pyc,,
+h11/tests/__pycache__/test_against_stdlib_http.cpython-312.pyc,,
+h11/tests/__pycache__/test_connection.cpython-312.pyc,,
+h11/tests/__pycache__/test_events.cpython-312.pyc,,
+h11/tests/__pycache__/test_headers.cpython-312.pyc,,
+h11/tests/__pycache__/test_helpers.cpython-312.pyc,,
+h11/tests/__pycache__/test_io.cpython-312.pyc,,
+h11/tests/__pycache__/test_receivebuffer.cpython-312.pyc,,
+h11/tests/__pycache__/test_state.cpython-312.pyc,,
+h11/tests/__pycache__/test_util.cpython-312.pyc,,
+h11/tests/data/test-file,sha256=ZJ03Rqs98oJw29OHzJg7LlMzyGQaRAY0r3AqBeM2wVU,65
+h11/tests/helpers.py,sha256=a1EVG_p7xU4wRsa3tMPTRxuaKCmretok9sxXWvqfmQA,3355
+h11/tests/test_against_stdlib_http.py,sha256=cojCHgHXFQ8gWhNlEEwl3trmOpN-5uDukRoHnElqo3A,3995
+h11/tests/test_connection.py,sha256=ZbPLDPclKvjgjAhgk-WlCPBaf17c4XUIV2tpaW08jOI,38720
+h11/tests/test_events.py,sha256=LPVLbcV-NvPNK9fW3rraR6Bdpz1hAlsWubMtNaJ5gHg,4657
+h11/tests/test_headers.py,sha256=qd8T1Zenuz5GbD6wklSJ5G8VS7trrYgMV0jT-SMvqg8,5612
+h11/tests/test_helpers.py,sha256=kAo0CEM4LGqmyyP2ZFmhsyq3UFJqoFfAbzu3hbWreRM,794
+h11/tests/test_io.py,sha256=uCZVnjarkRBkudfC1ij-KSCQ71XWJhnkgkgWWkKgYPQ,16386
+h11/tests/test_receivebuffer.py,sha256=3jGbeJM36Akqg_pAhPb7XzIn2NS6RhPg-Ryg8Eu6ytk,3454
+h11/tests/test_state.py,sha256=rqll9WqFsJPE0zSrtCn9LH659mPKsDeXZ-DwXwleuBQ,8928
+h11/tests/test_util.py,sha256=VO5L4nSFe4pgtSwKuv6u_6l0H7UeizF5WKuHTWreg70,2970
diff --git a/.venv/Lib/site-packages/h11-0.14.0.dist-info/WHEEL b/.venv/Lib/site-packages/h11-0.14.0.dist-info/WHEEL
new file mode 100644
index 0000000..5bad85f
--- /dev/null
+++ b/.venv/Lib/site-packages/h11-0.14.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/h11-0.14.0.dist-info/top_level.txt b/.venv/Lib/site-packages/h11-0.14.0.dist-info/top_level.txt
new file mode 100644
index 0000000..0d24def
--- /dev/null
+++ b/.venv/Lib/site-packages/h11-0.14.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+h11
diff --git a/.venv/Lib/site-packages/h11/__init__.py b/.venv/Lib/site-packages/h11/__init__.py
new file mode 100644
index 0000000..989e92c
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/__init__.py
@@ -0,0 +1,62 @@
+# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230),
+# containing no networking code at all, loosely modelled on hyper-h2's generic
+# implementation of HTTP/2 (and in particular the h2.connection.H2Connection
+# class). There's still a bunch of subtle details you need to get right if you
+# want to make this actually useful, because it doesn't implement all the
+# semantics to check that what you're asking to write to the wire is sensible,
+# but at least it gets you out of dealing with the wire itself.
+
+from h11._connection import Connection, NEED_DATA, PAUSED
+from h11._events import (
+ ConnectionClosed,
+ Data,
+ EndOfMessage,
+ Event,
+ InformationalResponse,
+ Request,
+ Response,
+)
+from h11._state import (
+ CLIENT,
+ CLOSED,
+ DONE,
+ ERROR,
+ IDLE,
+ MIGHT_SWITCH_PROTOCOL,
+ MUST_CLOSE,
+ SEND_BODY,
+ SEND_RESPONSE,
+ SERVER,
+ SWITCHED_PROTOCOL,
+)
+from h11._util import LocalProtocolError, ProtocolError, RemoteProtocolError
+from h11._version import __version__
+
+PRODUCT_ID = "python-h11/" + __version__
+
+
+__all__ = (
+ "Connection",
+ "NEED_DATA",
+ "PAUSED",
+ "ConnectionClosed",
+ "Data",
+ "EndOfMessage",
+ "Event",
+ "InformationalResponse",
+ "Request",
+ "Response",
+ "CLIENT",
+ "CLOSED",
+ "DONE",
+ "ERROR",
+ "IDLE",
+ "MUST_CLOSE",
+ "SEND_BODY",
+ "SEND_RESPONSE",
+ "SERVER",
+ "SWITCHED_PROTOCOL",
+ "ProtocolError",
+ "LocalProtocolError",
+ "RemoteProtocolError",
+)
diff --git a/.venv/Lib/site-packages/h11/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..dc23d87
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_abnf.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_abnf.cpython-312.pyc
new file mode 100644
index 0000000..f4f4b33
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_abnf.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_connection.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_connection.cpython-312.pyc
new file mode 100644
index 0000000..4c6f34b
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_connection.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_events.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_events.cpython-312.pyc
new file mode 100644
index 0000000..8c29177
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_events.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_headers.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_headers.cpython-312.pyc
new file mode 100644
index 0000000..f25e9ed
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_headers.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_readers.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_readers.cpython-312.pyc
new file mode 100644
index 0000000..0795454
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_readers.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_receivebuffer.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_receivebuffer.cpython-312.pyc
new file mode 100644
index 0000000..749bdaa
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_receivebuffer.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_state.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_state.cpython-312.pyc
new file mode 100644
index 0000000..3310327
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_state.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_util.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_util.cpython-312.pyc
new file mode 100644
index 0000000..c8e7d23
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_util.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_version.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_version.cpython-312.pyc
new file mode 100644
index 0000000..4329a29
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_version.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/__pycache__/_writers.cpython-312.pyc b/.venv/Lib/site-packages/h11/__pycache__/_writers.cpython-312.pyc
new file mode 100644
index 0000000..d76796c
Binary files /dev/null and b/.venv/Lib/site-packages/h11/__pycache__/_writers.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/_abnf.py b/.venv/Lib/site-packages/h11/_abnf.py
new file mode 100644
index 0000000..933587f
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_abnf.py
@@ -0,0 +1,132 @@
+# We use native strings for all the re patterns, to take advantage of string
+# formatting, and then convert to bytestrings when compiling the final re
+# objects.
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#whitespace
+# OWS = *( SP / HTAB )
+# ; optional whitespace
+OWS = r"[ \t]*"
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.token.separators
+# token = 1*tchar
+#
+# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+# / DIGIT / ALPHA
+# ; any VCHAR, except delimiters
+token = r"[-!#$%&'*+.^_`|~0-9a-zA-Z]+"
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#header.fields
+# field-name = token
+field_name = token
+
+# The standard says:
+#
+# field-value = *( field-content / obs-fold )
+# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+# field-vchar = VCHAR / obs-text
+# obs-fold = CRLF 1*( SP / HTAB )
+# ; obsolete line folding
+# ; see Section 3.2.4
+#
+# https://tools.ietf.org/html/rfc5234#appendix-B.1
+#
+# VCHAR = %x21-7E
+# ; visible (printing) characters
+#
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.quoted-string
+# obs-text = %x80-FF
+#
+# However, the standard definition of field-content is WRONG! It disallows
+# fields containing a single visible character surrounded by whitespace,
+# e.g. "foo a bar".
+#
+# See: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
+#
+# So our definition of field_content attempts to fix it up...
+#
+# Also, we allow lots of control characters, because apparently people assume
+# that they're legal in practice (e.g., google analytics makes cookies with
+# \x01 in them!):
+# https://github.com/python-hyper/h11/issues/57
+# We still don't allow NUL or whitespace, because those are often treated as
+# meta-characters and letting them through can lead to nasty issues like SSRF.
+vchar = r"[\x21-\x7e]"
+vchar_or_obs_text = r"[^\x00\s]"
+field_vchar = vchar_or_obs_text
+field_content = r"{field_vchar}+(?:[ \t]+{field_vchar}+)*".format(**globals())
+
+# We handle obs-fold at a different level, and our fixed-up field_content
+# already grows to swallow the whole value, so ? instead of *
+field_value = r"({field_content})?".format(**globals())
+
+# header-field = field-name ":" OWS field-value OWS
+header_field = (
+ r"(?P{field_name})"
+ r":"
+ r"{OWS}"
+ r"(?P{field_value})"
+ r"{OWS}".format(**globals())
+)
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#request.line
+#
+# request-line = method SP request-target SP HTTP-version CRLF
+# method = token
+# HTTP-version = HTTP-name "/" DIGIT "." DIGIT
+# HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive
+#
+# request-target is complicated (see RFC 7230 sec 5.3) -- could be path, full
+# URL, host+port (for connect), or even "*", but in any case we are guaranteed
+# that it contists of the visible printing characters.
+method = token
+request_target = r"{vchar}+".format(**globals())
+http_version = r"HTTP/(?P[0-9]\.[0-9])"
+request_line = (
+ r"(?P{method})"
+ r" "
+ r"(?P{request_target})"
+ r" "
+ r"{http_version}".format(**globals())
+)
+
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#status.line
+#
+# status-line = HTTP-version SP status-code SP reason-phrase CRLF
+# status-code = 3DIGIT
+# reason-phrase = *( HTAB / SP / VCHAR / obs-text )
+status_code = r"[0-9]{3}"
+reason_phrase = r"([ \t]|{vchar_or_obs_text})*".format(**globals())
+status_line = (
+ r"{http_version}"
+ r" "
+ r"(?P{status_code})"
+ # However, there are apparently a few too many servers out there that just
+ # leave out the reason phrase:
+ # https://github.com/scrapy/scrapy/issues/345#issuecomment-281756036
+ # https://github.com/seanmonstar/httparse/issues/29
+ # so make it optional. ?: is a non-capturing group.
+ r"(?: (?P{reason_phrase}))?".format(**globals())
+)
+
+HEXDIG = r"[0-9A-Fa-f]"
+# Actually
+#
+# chunk-size = 1*HEXDIG
+#
+# but we impose an upper-limit to avoid ridiculosity. len(str(2**64)) == 20
+chunk_size = r"({HEXDIG}){{1,20}}".format(**globals())
+# Actually
+#
+# chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
+#
+# but we aren't parsing the things so we don't really care.
+chunk_ext = r";.*"
+chunk_header = (
+ r"(?P{chunk_size})"
+ r"(?P{chunk_ext})?"
+ r"{OWS}\r\n".format(
+ **globals()
+ ) # Even though the specification does not allow for extra whitespaces,
+ # we are lenient with trailing whitespaces because some servers on the wild use it.
+)
diff --git a/.venv/Lib/site-packages/h11/_connection.py b/.venv/Lib/site-packages/h11/_connection.py
new file mode 100644
index 0000000..d175270
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_connection.py
@@ -0,0 +1,633 @@
+# This contains the main Connection class. Everything in h11 revolves around
+# this.
+from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union
+
+from ._events import (
+ ConnectionClosed,
+ Data,
+ EndOfMessage,
+ Event,
+ InformationalResponse,
+ Request,
+ Response,
+)
+from ._headers import get_comma_header, has_expect_100_continue, set_comma_header
+from ._readers import READERS, ReadersType
+from ._receivebuffer import ReceiveBuffer
+from ._state import (
+ _SWITCH_CONNECT,
+ _SWITCH_UPGRADE,
+ CLIENT,
+ ConnectionState,
+ DONE,
+ ERROR,
+ MIGHT_SWITCH_PROTOCOL,
+ SEND_BODY,
+ SERVER,
+ SWITCHED_PROTOCOL,
+)
+from ._util import ( # Import the internal things we need
+ LocalProtocolError,
+ RemoteProtocolError,
+ Sentinel,
+)
+from ._writers import WRITERS, WritersType
+
+# Everything in __all__ gets re-exported as part of the h11 public API.
+__all__ = ["Connection", "NEED_DATA", "PAUSED"]
+
+
+class NEED_DATA(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class PAUSED(Sentinel, metaclass=Sentinel):
+ pass
+
+
+# If we ever have this much buffered without it making a complete parseable
+# event, we error out. The only time we really buffer is when reading the
+# request/response line + headers together, so this is effectively the limit on
+# the size of that.
+#
+# Some precedents for defaults:
+# - node.js: 80 * 1024
+# - tomcat: 8 * 1024
+# - IIS: 16 * 1024
+# - Apache: <8 KiB per line>
+DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024
+
+# RFC 7230's rules for connection lifecycles:
+# - If either side says they want to close the connection, then the connection
+# must close.
+# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close
+# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive
+# (and even this is a mess -- e.g. if you're implementing a proxy then
+# sending Connection: keep-alive is forbidden).
+#
+# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So
+# our rule is:
+# - If someone says Connection: close, we will close
+# - If someone uses HTTP/1.0, we will close.
+def _keep_alive(event: Union[Request, Response]) -> bool:
+ connection = get_comma_header(event.headers, b"connection")
+ if b"close" in connection:
+ return False
+ if getattr(event, "http_version", b"1.1") < b"1.1":
+ return False
+ return True
+
+
+def _body_framing(
+ request_method: bytes, event: Union[Request, Response]
+) -> Tuple[str, Union[Tuple[()], Tuple[int]]]:
+ # Called when we enter SEND_BODY to figure out framing information for
+ # this body.
+ #
+ # These are the only two events that can trigger a SEND_BODY state:
+ assert type(event) in (Request, Response)
+ # Returns one of:
+ #
+ # ("content-length", count)
+ # ("chunked", ())
+ # ("http/1.0", ())
+ #
+ # which are (lookup key, *args) for constructing body reader/writer
+ # objects.
+ #
+ # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3
+ #
+ # Step 1: some responses always have an empty body, regardless of what the
+ # headers say.
+ if type(event) is Response:
+ if (
+ event.status_code in (204, 304)
+ or request_method == b"HEAD"
+ or (request_method == b"CONNECT" and 200 <= event.status_code < 300)
+ ):
+ return ("content-length", (0,))
+ # Section 3.3.3 also lists another case -- responses with status_code
+ # < 200. For us these are InformationalResponses, not Responses, so
+ # they can't get into this function in the first place.
+ assert event.status_code >= 200
+
+ # Step 2: check for Transfer-Encoding (T-E beats C-L):
+ transfer_encodings = get_comma_header(event.headers, b"transfer-encoding")
+ if transfer_encodings:
+ assert transfer_encodings == [b"chunked"]
+ return ("chunked", ())
+
+ # Step 3: check for Content-Length
+ content_lengths = get_comma_header(event.headers, b"content-length")
+ if content_lengths:
+ return ("content-length", (int(content_lengths[0]),))
+
+ # Step 4: no applicable headers; fallback/default depends on type
+ if type(event) is Request:
+ return ("content-length", (0,))
+ else:
+ return ("http/1.0", ())
+
+
+################################################################
+#
+# The main Connection class
+#
+################################################################
+
+
+class Connection:
+ """An object encapsulating the state of an HTTP connection.
+
+ Args:
+ our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If
+ you're implementing a server, pass :data:`h11.SERVER`.
+
+ max_incomplete_event_size (int):
+ The maximum number of bytes we're willing to buffer of an
+ incomplete event. In practice this mostly sets a limit on the
+ maximum size of the request/response line + headers. If this is
+ exceeded, then :meth:`next_event` will raise
+ :exc:`RemoteProtocolError`.
+
+ """
+
+ def __init__(
+ self,
+ our_role: Type[Sentinel],
+ max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE,
+ ) -> None:
+ self._max_incomplete_event_size = max_incomplete_event_size
+ # State and role tracking
+ if our_role not in (CLIENT, SERVER):
+ raise ValueError("expected CLIENT or SERVER, not {!r}".format(our_role))
+ self.our_role = our_role
+ self.their_role: Type[Sentinel]
+ if our_role is CLIENT:
+ self.their_role = SERVER
+ else:
+ self.their_role = CLIENT
+ self._cstate = ConnectionState()
+
+ # Callables for converting data->events or vice-versa given the
+ # current state
+ self._writer = self._get_io_object(self.our_role, None, WRITERS)
+ self._reader = self._get_io_object(self.their_role, None, READERS)
+
+ # Holds any unprocessed received data
+ self._receive_buffer = ReceiveBuffer()
+ # If this is true, then it indicates that the incoming connection was
+ # closed *after* the end of whatever's in self._receive_buffer:
+ self._receive_buffer_closed = False
+
+ # Extra bits of state that don't fit into the state machine.
+ #
+ # These two are only used to interpret framing headers for figuring
+ # out how to read/write response bodies. their_http_version is also
+ # made available as a convenient public API.
+ self.their_http_version: Optional[bytes] = None
+ self._request_method: Optional[bytes] = None
+ # This is pure flow-control and doesn't at all affect the set of legal
+ # transitions, so no need to bother ConnectionState with it:
+ self.client_is_waiting_for_100_continue = False
+
+ @property
+ def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]:
+ """A dictionary like::
+
+ {CLIENT: , SERVER: }
+
+ See :ref:`state-machine` for details.
+
+ """
+ return dict(self._cstate.states)
+
+ @property
+ def our_state(self) -> Type[Sentinel]:
+ """The current state of whichever role we are playing. See
+ :ref:`state-machine` for details.
+ """
+ return self._cstate.states[self.our_role]
+
+ @property
+ def their_state(self) -> Type[Sentinel]:
+ """The current state of whichever role we are NOT playing. See
+ :ref:`state-machine` for details.
+ """
+ return self._cstate.states[self.their_role]
+
+ @property
+ def they_are_waiting_for_100_continue(self) -> bool:
+ return self.their_role is CLIENT and self.client_is_waiting_for_100_continue
+
+ def start_next_cycle(self) -> None:
+ """Attempt to reset our connection state for a new request/response
+ cycle.
+
+ If both client and server are in :data:`DONE` state, then resets them
+ both to :data:`IDLE` state in preparation for a new request/response
+ cycle on this same connection. Otherwise, raises a
+ :exc:`LocalProtocolError`.
+
+ See :ref:`keepalive-and-pipelining`.
+
+ """
+ old_states = dict(self._cstate.states)
+ self._cstate.start_next_cycle()
+ self._request_method = None
+ # self.their_http_version gets left alone, since it presumably lasts
+ # beyond a single request/response cycle
+ assert not self.client_is_waiting_for_100_continue
+ self._respond_to_state_changes(old_states)
+
+ def _process_error(self, role: Type[Sentinel]) -> None:
+ old_states = dict(self._cstate.states)
+ self._cstate.process_error(role)
+ self._respond_to_state_changes(old_states)
+
+ def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]:
+ if type(event) is InformationalResponse and event.status_code == 101:
+ return _SWITCH_UPGRADE
+ if type(event) is Response:
+ if (
+ _SWITCH_CONNECT in self._cstate.pending_switch_proposals
+ and 200 <= event.status_code < 300
+ ):
+ return _SWITCH_CONNECT
+ return None
+
+ # All events go through here
+ def _process_event(self, role: Type[Sentinel], event: Event) -> None:
+ # First, pass the event through the state machine to make sure it
+ # succeeds.
+ old_states = dict(self._cstate.states)
+ if role is CLIENT and type(event) is Request:
+ if event.method == b"CONNECT":
+ self._cstate.process_client_switch_proposal(_SWITCH_CONNECT)
+ if get_comma_header(event.headers, b"upgrade"):
+ self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE)
+ server_switch_event = None
+ if role is SERVER:
+ server_switch_event = self._server_switch_event(event)
+ self._cstate.process_event(role, type(event), server_switch_event)
+
+ # Then perform the updates triggered by it.
+
+ if type(event) is Request:
+ self._request_method = event.method
+
+ if role is self.their_role and type(event) in (
+ Request,
+ Response,
+ InformationalResponse,
+ ):
+ event = cast(Union[Request, Response, InformationalResponse], event)
+ self.their_http_version = event.http_version
+
+ # Keep alive handling
+ #
+ # RFC 7230 doesn't really say what one should do if Connection: close
+ # shows up on a 1xx InformationalResponse. I think the idea is that
+ # this is not supposed to happen. In any case, if it does happen, we
+ # ignore it.
+ if type(event) in (Request, Response) and not _keep_alive(
+ cast(Union[Request, Response], event)
+ ):
+ self._cstate.process_keep_alive_disabled()
+
+ # 100-continue
+ if type(event) is Request and has_expect_100_continue(event):
+ self.client_is_waiting_for_100_continue = True
+ if type(event) in (InformationalResponse, Response):
+ self.client_is_waiting_for_100_continue = False
+ if role is CLIENT and type(event) in (Data, EndOfMessage):
+ self.client_is_waiting_for_100_continue = False
+
+ self._respond_to_state_changes(old_states, event)
+
+ def _get_io_object(
+ self,
+ role: Type[Sentinel],
+ event: Optional[Event],
+ io_dict: Union[ReadersType, WritersType],
+ ) -> Optional[Callable[..., Any]]:
+ # event may be None; it's only used when entering SEND_BODY
+ state = self._cstate.states[role]
+ if state is SEND_BODY:
+ # Special case: the io_dict has a dict of reader/writer factories
+ # that depend on the request/response framing.
+ framing_type, args = _body_framing(
+ cast(bytes, self._request_method), cast(Union[Request, Response], event)
+ )
+ return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index]
+ else:
+ # General case: the io_dict just has the appropriate reader/writer
+ # for this state
+ return io_dict.get((role, state)) # type: ignore[return-value]
+
+ # This must be called after any action that might have caused
+ # self._cstate.states to change.
+ def _respond_to_state_changes(
+ self,
+ old_states: Dict[Type[Sentinel], Type[Sentinel]],
+ event: Optional[Event] = None,
+ ) -> None:
+ # Update reader/writer
+ if self.our_state != old_states[self.our_role]:
+ self._writer = self._get_io_object(self.our_role, event, WRITERS)
+ if self.their_state != old_states[self.their_role]:
+ self._reader = self._get_io_object(self.their_role, event, READERS)
+
+ @property
+ def trailing_data(self) -> Tuple[bytes, bool]:
+ """Data that has been received, but not yet processed, represented as
+ a tuple with two elements, where the first is a byte-string containing
+ the unprocessed data itself, and the second is a bool that is True if
+ the receive connection was closed.
+
+ See :ref:`switching-protocols` for discussion of why you'd want this.
+ """
+ return (bytes(self._receive_buffer), self._receive_buffer_closed)
+
+ def receive_data(self, data: bytes) -> None:
+ """Add data to our internal receive buffer.
+
+ This does not actually do any processing on the data, just stores
+ it. To trigger processing, you have to call :meth:`next_event`.
+
+ Args:
+ data (:term:`bytes-like object`):
+ The new data that was just received.
+
+ Special case: If *data* is an empty byte-string like ``b""``,
+ then this indicates that the remote side has closed the
+ connection (end of file). Normally this is convenient, because
+ standard Python APIs like :meth:`file.read` or
+ :meth:`socket.recv` use ``b""`` to indicate end-of-file, while
+ other failures to read are indicated using other mechanisms
+ like raising :exc:`TimeoutError`. When using such an API you
+ can just blindly pass through whatever you get from ``read``
+ to :meth:`receive_data`, and everything will work.
+
+ But, if you have an API where reading an empty string is a
+ valid non-EOF condition, then you need to be aware of this and
+ make sure to check for such strings and avoid passing them to
+ :meth:`receive_data`.
+
+ Returns:
+ Nothing, but after calling this you should call :meth:`next_event`
+ to parse the newly received data.
+
+ Raises:
+ RuntimeError:
+ Raised if you pass an empty *data*, indicating EOF, and then
+ pass a non-empty *data*, indicating more data that somehow
+ arrived after the EOF.
+
+ (Calling ``receive_data(b"")`` multiple times is fine,
+ and equivalent to calling it once.)
+
+ """
+ if data:
+ if self._receive_buffer_closed:
+ raise RuntimeError("received close, then received more data?")
+ self._receive_buffer += data
+ else:
+ self._receive_buffer_closed = True
+
+ def _extract_next_receive_event(
+ self,
+ ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
+ state = self.their_state
+ # We don't pause immediately when they enter DONE, because even in
+ # DONE state we can still process a ConnectionClosed() event. But
+ # if we have data in our buffer, then we definitely aren't getting
+ # a ConnectionClosed() immediately and we need to pause.
+ if state is DONE and self._receive_buffer:
+ return PAUSED
+ if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL:
+ return PAUSED
+ assert self._reader is not None
+ event = self._reader(self._receive_buffer)
+ if event is None:
+ if not self._receive_buffer and self._receive_buffer_closed:
+ # In some unusual cases (basically just HTTP/1.0 bodies), EOF
+ # triggers an actual protocol event; in that case, we want to
+ # return that event, and then the state will change and we'll
+ # get called again to generate the actual ConnectionClosed().
+ if hasattr(self._reader, "read_eof"):
+ event = self._reader.read_eof() # type: ignore[attr-defined]
+ else:
+ event = ConnectionClosed()
+ if event is None:
+ event = NEED_DATA
+ return event # type: ignore[no-any-return]
+
+ def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]:
+ """Parse the next event out of our receive buffer, update our internal
+ state, and return it.
+
+ This is a mutating operation -- think of it like calling :func:`next`
+ on an iterator.
+
+ Returns:
+ : One of three things:
+
+ 1) An event object -- see :ref:`events`.
+
+ 2) The special constant :data:`NEED_DATA`, which indicates that
+ you need to read more data from your socket and pass it to
+ :meth:`receive_data` before this method will be able to return
+ any more events.
+
+ 3) The special constant :data:`PAUSED`, which indicates that we
+ are not in a state where we can process incoming data (usually
+ because the peer has finished their part of the current
+ request/response cycle, and you have not yet called
+ :meth:`start_next_cycle`). See :ref:`flow-control` for details.
+
+ Raises:
+ RemoteProtocolError:
+ The peer has misbehaved. You should close the connection
+ (possibly after sending some kind of 4xx response).
+
+ Once this method returns :class:`ConnectionClosed` once, then all
+ subsequent calls will also return :class:`ConnectionClosed`.
+
+ If this method raises any exception besides :exc:`RemoteProtocolError`
+ then that's a bug -- if it happens please file a bug report!
+
+ If this method raises any exception then it also sets
+ :attr:`Connection.their_state` to :data:`ERROR` -- see
+ :ref:`error-handling` for discussion.
+
+ """
+
+ if self.their_state is ERROR:
+ raise RemoteProtocolError("Can't receive data when peer state is ERROR")
+ try:
+ event = self._extract_next_receive_event()
+ if event not in [NEED_DATA, PAUSED]:
+ self._process_event(self.their_role, cast(Event, event))
+ if event is NEED_DATA:
+ if len(self._receive_buffer) > self._max_incomplete_event_size:
+ # 431 is "Request header fields too large" which is pretty
+ # much the only situation where we can get here
+ raise RemoteProtocolError(
+ "Receive buffer too long", error_status_hint=431
+ )
+ if self._receive_buffer_closed:
+ # We're still trying to complete some event, but that's
+ # never going to happen because no more data is coming
+ raise RemoteProtocolError("peer unexpectedly closed connection")
+ return event
+ except BaseException as exc:
+ self._process_error(self.their_role)
+ if isinstance(exc, LocalProtocolError):
+ exc._reraise_as_remote_protocol_error()
+ else:
+ raise
+
+ def send(self, event: Event) -> Optional[bytes]:
+ """Convert a high-level event into bytes that can be sent to the peer,
+ while updating our internal state machine.
+
+ Args:
+ event: The :ref:`event ` to send.
+
+ Returns:
+ If ``type(event) is ConnectionClosed``, then returns
+ ``None``. Otherwise, returns a :term:`bytes-like object`.
+
+ Raises:
+ LocalProtocolError:
+ Sending this event at this time would violate our
+ understanding of the HTTP/1.1 protocol.
+
+ If this method raises any exception then it also sets
+ :attr:`Connection.our_state` to :data:`ERROR` -- see
+ :ref:`error-handling` for discussion.
+
+ """
+ data_list = self.send_with_data_passthrough(event)
+ if data_list is None:
+ return None
+ else:
+ return b"".join(data_list)
+
+ def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]:
+ """Identical to :meth:`send`, except that in situations where
+ :meth:`send` returns a single :term:`bytes-like object`, this instead
+ returns a list of them -- and when sending a :class:`Data` event, this
+ list is guaranteed to contain the exact object you passed in as
+ :attr:`Data.data`. See :ref:`sendfile` for discussion.
+
+ """
+ if self.our_state is ERROR:
+ raise LocalProtocolError("Can't send data when our state is ERROR")
+ try:
+ if type(event) is Response:
+ event = self._clean_up_response_headers_for_sending(event)
+ # We want to call _process_event before calling the writer,
+ # because if someone tries to do something invalid then this will
+ # give a sensible error message, while our writers all just assume
+ # they will only receive valid events. But, _process_event might
+ # change self._writer. So we have to do a little dance:
+ writer = self._writer
+ self._process_event(self.our_role, event)
+ if type(event) is ConnectionClosed:
+ return None
+ else:
+ # In any situation where writer is None, process_event should
+ # have raised ProtocolError
+ assert writer is not None
+ data_list: List[bytes] = []
+ writer(event, data_list.append)
+ return data_list
+ except:
+ self._process_error(self.our_role)
+ raise
+
+ def send_failed(self) -> None:
+ """Notify the state machine that we failed to send the data it gave
+ us.
+
+ This causes :attr:`Connection.our_state` to immediately become
+ :data:`ERROR` -- see :ref:`error-handling` for discussion.
+
+ """
+ self._process_error(self.our_role)
+
+ # When sending a Response, we take responsibility for a few things:
+ #
+ # - Sometimes you MUST set Connection: close. We take care of those
+ # times. (You can also set it yourself if you want, and if you do then
+ # we'll respect that and close the connection at the right time. But you
+ # don't have to worry about that unless you want to.)
+ #
+ # - The user has to set Content-Length if they want it. Otherwise, for
+ # responses that have bodies (e.g. not HEAD), then we will automatically
+ # select the right mechanism for streaming a body of unknown length,
+ # which depends on depending on the peer's HTTP version.
+ #
+ # This function's *only* responsibility is making sure headers are set up
+ # right -- everything downstream just looks at the headers. There are no
+ # side channels.
+ def _clean_up_response_headers_for_sending(self, response: Response) -> Response:
+ assert type(response) is Response
+
+ headers = response.headers
+ need_close = False
+
+ # HEAD requests need some special handling: they always act like they
+ # have Content-Length: 0, and that's how _body_framing treats
+ # them. But their headers are supposed to match what we would send if
+ # the request was a GET. (Technically there is one deviation allowed:
+ # we're allowed to leave out the framing headers -- see
+ # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as
+ # easy to get them right.)
+ method_for_choosing_headers = cast(bytes, self._request_method)
+ if method_for_choosing_headers == b"HEAD":
+ method_for_choosing_headers = b"GET"
+ framing_type, _ = _body_framing(method_for_choosing_headers, response)
+ if framing_type in ("chunked", "http/1.0"):
+ # This response has a body of unknown length.
+ # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked
+ # If our peer is HTTP/1.0, we use no framing headers, and close the
+ # connection afterwards.
+ #
+ # Make sure to clear Content-Length (in principle user could have
+ # set both and then we ignored Content-Length b/c
+ # Transfer-Encoding overwrote it -- this would be naughty of them,
+ # but the HTTP spec says that if our peer does this then we have
+ # to fix it instead of erroring out, so we'll accord the user the
+ # same respect).
+ headers = set_comma_header(headers, b"content-length", [])
+ if self.their_http_version is None or self.their_http_version < b"1.1":
+ # Either we never got a valid request and are sending back an
+ # error (their_http_version is None), so we assume the worst;
+ # or else we did get a valid HTTP/1.0 request, so we know that
+ # they don't understand chunked encoding.
+ headers = set_comma_header(headers, b"transfer-encoding", [])
+ # This is actually redundant ATM, since currently we
+ # unconditionally disable keep-alive when talking to HTTP/1.0
+ # peers. But let's be defensive just in case we add
+ # Connection: keep-alive support later:
+ if self._request_method != b"HEAD":
+ need_close = True
+ else:
+ headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"])
+
+ if not self._cstate.keep_alive or need_close:
+ # Make sure Connection: close is set
+ connection = set(get_comma_header(headers, b"connection"))
+ connection.discard(b"keep-alive")
+ connection.add(b"close")
+ headers = set_comma_header(headers, b"connection", sorted(connection))
+
+ return Response(
+ headers=headers,
+ status_code=response.status_code,
+ http_version=response.http_version,
+ reason=response.reason,
+ )
diff --git a/.venv/Lib/site-packages/h11/_events.py b/.venv/Lib/site-packages/h11/_events.py
new file mode 100644
index 0000000..075bf8a
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_events.py
@@ -0,0 +1,369 @@
+# High level events that make up HTTP/1.1 conversations. Loosely inspired by
+# the corresponding events in hyper-h2:
+#
+# http://python-hyper.org/h2/en/stable/api.html#events
+#
+# Don't subclass these. Stuff will break.
+
+import re
+from abc import ABC
+from dataclasses import dataclass, field
+from typing import Any, cast, Dict, List, Tuple, Union
+
+from ._abnf import method, request_target
+from ._headers import Headers, normalize_and_validate
+from ._util import bytesify, LocalProtocolError, validate
+
+# Everything in __all__ gets re-exported as part of the h11 public API.
+__all__ = [
+ "Event",
+ "Request",
+ "InformationalResponse",
+ "Response",
+ "Data",
+ "EndOfMessage",
+ "ConnectionClosed",
+]
+
+method_re = re.compile(method.encode("ascii"))
+request_target_re = re.compile(request_target.encode("ascii"))
+
+
+class Event(ABC):
+ """
+ Base class for h11 events.
+ """
+
+ __slots__ = ()
+
+
+@dataclass(init=False, frozen=True)
+class Request(Event):
+ """The beginning of an HTTP request.
+
+ Fields:
+
+ .. attribute:: method
+
+ An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte
+ string. :term:`Bytes-like objects ` and native
+ strings containing only ascii characters will be automatically
+ converted to byte strings.
+
+ .. attribute:: target
+
+ The target of an HTTP request, e.g. ``b"/index.html"``, or one of the
+ more exotic formats described in `RFC 7320, section 5.3
+ `_. Always a byte
+ string. :term:`Bytes-like objects ` and native
+ strings containing only ascii characters will be automatically
+ converted to byte strings.
+
+ .. attribute:: headers
+
+ Request headers, represented as a list of (name, value) pairs. See
+ :ref:`the header normalization rules ` for details.
+
+ .. attribute:: http_version
+
+ The HTTP protocol version, represented as a byte string like
+ ``b"1.1"``. See :ref:`the HTTP version normalization rules
+ ` for details.
+
+ """
+
+ __slots__ = ("method", "headers", "target", "http_version")
+
+ method: bytes
+ headers: Headers
+ target: bytes
+ http_version: bytes
+
+ def __init__(
+ self,
+ *,
+ method: Union[bytes, str],
+ headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]],
+ target: Union[bytes, str],
+ http_version: Union[bytes, str] = b"1.1",
+ _parsed: bool = False,
+ ) -> None:
+ super().__init__()
+ if isinstance(headers, Headers):
+ object.__setattr__(self, "headers", headers)
+ else:
+ object.__setattr__(
+ self, "headers", normalize_and_validate(headers, _parsed=_parsed)
+ )
+ if not _parsed:
+ object.__setattr__(self, "method", bytesify(method))
+ object.__setattr__(self, "target", bytesify(target))
+ object.__setattr__(self, "http_version", bytesify(http_version))
+ else:
+ object.__setattr__(self, "method", method)
+ object.__setattr__(self, "target", target)
+ object.__setattr__(self, "http_version", http_version)
+
+ # "A server MUST respond with a 400 (Bad Request) status code to any
+ # HTTP/1.1 request message that lacks a Host header field and to any
+ # request message that contains more than one Host header field or a
+ # Host header field with an invalid field-value."
+ # -- https://tools.ietf.org/html/rfc7230#section-5.4
+ host_count = 0
+ for name, value in self.headers:
+ if name == b"host":
+ host_count += 1
+ if self.http_version == b"1.1" and host_count == 0:
+ raise LocalProtocolError("Missing mandatory Host: header")
+ if host_count > 1:
+ raise LocalProtocolError("Found multiple Host: headers")
+
+ validate(method_re, self.method, "Illegal method characters")
+ validate(request_target_re, self.target, "Illegal target characters")
+
+ # This is an unhashable type.
+ __hash__ = None # type: ignore
+
+
+@dataclass(init=False, frozen=True)
+class _ResponseBase(Event):
+ __slots__ = ("headers", "http_version", "reason", "status_code")
+
+ headers: Headers
+ http_version: bytes
+ reason: bytes
+ status_code: int
+
+ def __init__(
+ self,
+ *,
+ headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]],
+ status_code: int,
+ http_version: Union[bytes, str] = b"1.1",
+ reason: Union[bytes, str] = b"",
+ _parsed: bool = False,
+ ) -> None:
+ super().__init__()
+ if isinstance(headers, Headers):
+ object.__setattr__(self, "headers", headers)
+ else:
+ object.__setattr__(
+ self, "headers", normalize_and_validate(headers, _parsed=_parsed)
+ )
+ if not _parsed:
+ object.__setattr__(self, "reason", bytesify(reason))
+ object.__setattr__(self, "http_version", bytesify(http_version))
+ if not isinstance(status_code, int):
+ raise LocalProtocolError("status code must be integer")
+ # Because IntEnum objects are instances of int, but aren't
+ # duck-compatible (sigh), see gh-72.
+ object.__setattr__(self, "status_code", int(status_code))
+ else:
+ object.__setattr__(self, "reason", reason)
+ object.__setattr__(self, "http_version", http_version)
+ object.__setattr__(self, "status_code", status_code)
+
+ self.__post_init__()
+
+ def __post_init__(self) -> None:
+ pass
+
+ # This is an unhashable type.
+ __hash__ = None # type: ignore
+
+
+@dataclass(init=False, frozen=True)
+class InformationalResponse(_ResponseBase):
+ """An HTTP informational response.
+
+ Fields:
+
+ .. attribute:: status_code
+
+ The status code of this response, as an integer. For an
+ :class:`InformationalResponse`, this is always in the range [100,
+ 200).
+
+ .. attribute:: headers
+
+ Request headers, represented as a list of (name, value) pairs. See
+ :ref:`the header normalization rules ` for
+ details.
+
+ .. attribute:: http_version
+
+ The HTTP protocol version, represented as a byte string like
+ ``b"1.1"``. See :ref:`the HTTP version normalization rules
+ ` for details.
+
+ .. attribute:: reason
+
+ The reason phrase of this response, as a byte string. For example:
+ ``b"OK"``, or ``b"Not Found"``.
+
+ """
+
+ def __post_init__(self) -> None:
+ if not (100 <= self.status_code < 200):
+ raise LocalProtocolError(
+ "InformationalResponse status_code should be in range "
+ "[100, 200), not {}".format(self.status_code)
+ )
+
+ # This is an unhashable type.
+ __hash__ = None # type: ignore
+
+
+@dataclass(init=False, frozen=True)
+class Response(_ResponseBase):
+ """The beginning of an HTTP response.
+
+ Fields:
+
+ .. attribute:: status_code
+
+ The status code of this response, as an integer. For an
+ :class:`Response`, this is always in the range [200,
+ 1000).
+
+ .. attribute:: headers
+
+ Request headers, represented as a list of (name, value) pairs. See
+ :ref:`the header normalization rules ` for details.
+
+ .. attribute:: http_version
+
+ The HTTP protocol version, represented as a byte string like
+ ``b"1.1"``. See :ref:`the HTTP version normalization rules
+ ` for details.
+
+ .. attribute:: reason
+
+ The reason phrase of this response, as a byte string. For example:
+ ``b"OK"``, or ``b"Not Found"``.
+
+ """
+
+ def __post_init__(self) -> None:
+ if not (200 <= self.status_code < 1000):
+ raise LocalProtocolError(
+ "Response status_code should be in range [200, 1000), not {}".format(
+ self.status_code
+ )
+ )
+
+ # This is an unhashable type.
+ __hash__ = None # type: ignore
+
+
+@dataclass(init=False, frozen=True)
+class Data(Event):
+ """Part of an HTTP message body.
+
+ Fields:
+
+ .. attribute:: data
+
+ A :term:`bytes-like object` containing part of a message body. Or, if
+ using the ``combine=False`` argument to :meth:`Connection.send`, then
+ any object that your socket writing code knows what to do with, and for
+ which calling :func:`len` returns the number of bytes that will be
+ written -- see :ref:`sendfile` for details.
+
+ .. attribute:: chunk_start
+
+ A marker that indicates whether this data object is from the start of a
+ chunked transfer encoding chunk. This field is ignored when when a Data
+ event is provided to :meth:`Connection.send`: it is only valid on
+ events emitted from :meth:`Connection.next_event`. You probably
+ shouldn't use this attribute at all; see
+ :ref:`chunk-delimiters-are-bad` for details.
+
+ .. attribute:: chunk_end
+
+ A marker that indicates whether this data object is the last for a
+ given chunked transfer encoding chunk. This field is ignored when when
+ a Data event is provided to :meth:`Connection.send`: it is only valid
+ on events emitted from :meth:`Connection.next_event`. You probably
+ shouldn't use this attribute at all; see
+ :ref:`chunk-delimiters-are-bad` for details.
+
+ """
+
+ __slots__ = ("data", "chunk_start", "chunk_end")
+
+ data: bytes
+ chunk_start: bool
+ chunk_end: bool
+
+ def __init__(
+ self, data: bytes, chunk_start: bool = False, chunk_end: bool = False
+ ) -> None:
+ object.__setattr__(self, "data", data)
+ object.__setattr__(self, "chunk_start", chunk_start)
+ object.__setattr__(self, "chunk_end", chunk_end)
+
+ # This is an unhashable type.
+ __hash__ = None # type: ignore
+
+
+# XX FIXME: "A recipient MUST ignore (or consider as an error) any fields that
+# are forbidden to be sent in a trailer, since processing them as if they were
+# present in the header section might bypass external security filters."
+# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#chunked.trailer.part
+# Unfortunately, the list of forbidden fields is long and vague :-/
+@dataclass(init=False, frozen=True)
+class EndOfMessage(Event):
+ """The end of an HTTP message.
+
+ Fields:
+
+ .. attribute:: headers
+
+ Default value: ``[]``
+
+ Any trailing headers attached to this message, represented as a list of
+ (name, value) pairs. See :ref:`the header normalization rules
+ ` for details.
+
+ Must be empty unless ``Transfer-Encoding: chunked`` is in use.
+
+ """
+
+ __slots__ = ("headers",)
+
+ headers: Headers
+
+ def __init__(
+ self,
+ *,
+ headers: Union[
+ Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]], None
+ ] = None,
+ _parsed: bool = False,
+ ) -> None:
+ super().__init__()
+ if headers is None:
+ headers = Headers([])
+ elif not isinstance(headers, Headers):
+ headers = normalize_and_validate(headers, _parsed=_parsed)
+
+ object.__setattr__(self, "headers", headers)
+
+ # This is an unhashable type.
+ __hash__ = None # type: ignore
+
+
+@dataclass(frozen=True)
+class ConnectionClosed(Event):
+ """This event indicates that the sender has closed their outgoing
+ connection.
+
+ Note that this does not necessarily mean that they can't *receive* further
+ data, because TCP connections are composed to two one-way channels which
+ can be closed independently. See :ref:`closing` for details.
+
+ No fields.
+ """
+
+ pass
diff --git a/.venv/Lib/site-packages/h11/_headers.py b/.venv/Lib/site-packages/h11/_headers.py
new file mode 100644
index 0000000..b97d020
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_headers.py
@@ -0,0 +1,278 @@
+import re
+from typing import AnyStr, cast, List, overload, Sequence, Tuple, TYPE_CHECKING, Union
+
+from ._abnf import field_name, field_value
+from ._util import bytesify, LocalProtocolError, validate
+
+if TYPE_CHECKING:
+ from ._events import Request
+
+try:
+ from typing import Literal
+except ImportError:
+ from typing_extensions import Literal # type: ignore
+
+
+# Facts
+# -----
+#
+# Headers are:
+# keys: case-insensitive ascii
+# values: mixture of ascii and raw bytes
+#
+# "Historically, HTTP has allowed field content with text in the ISO-8859-1
+# charset [ISO-8859-1], supporting other charsets only through use of
+# [RFC2047] encoding. In practice, most HTTP header field values use only a
+# subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD
+# limit their field values to US-ASCII octets. A recipient SHOULD treat other
+# octets in field content (obs-text) as opaque data."
+# And it deprecates all non-ascii values
+#
+# Leading/trailing whitespace in header names is forbidden
+#
+# Values get leading/trailing whitespace stripped
+#
+# Content-Disposition actually needs to contain unicode semantically; to
+# accomplish this it has a terrifically weird way of encoding the filename
+# itself as ascii (and even this still has lots of cross-browser
+# incompatibilities)
+#
+# Order is important:
+# "a proxy MUST NOT change the order of these field values when forwarding a
+# message"
+# (and there are several headers where the order indicates a preference)
+#
+# Multiple occurences of the same header:
+# "A sender MUST NOT generate multiple header fields with the same field name
+# in a message unless either the entire field value for that header field is
+# defined as a comma-separated list [or the header is Set-Cookie which gets a
+# special exception]" - RFC 7230. (cookies are in RFC 6265)
+#
+# So every header aside from Set-Cookie can be merged by b", ".join if it
+# occurs repeatedly. But, of course, they can't necessarily be split by
+# .split(b","), because quoting.
+#
+# Given all this mess (case insensitive, duplicates allowed, order is
+# important, ...), there doesn't appear to be any standard way to handle
+# headers in Python -- they're almost like dicts, but... actually just
+# aren't. For now we punt and just use a super simple representation: headers
+# are a list of pairs
+#
+# [(name1, value1), (name2, value2), ...]
+#
+# where all entries are bytestrings, names are lowercase and have no
+# leading/trailing whitespace, and values are bytestrings with no
+# leading/trailing whitespace. Searching and updating are done via naive O(n)
+# methods.
+#
+# Maybe a dict-of-lists would be better?
+
+_content_length_re = re.compile(rb"[0-9]+")
+_field_name_re = re.compile(field_name.encode("ascii"))
+_field_value_re = re.compile(field_value.encode("ascii"))
+
+
+class Headers(Sequence[Tuple[bytes, bytes]]):
+ """
+ A list-like interface that allows iterating over headers as byte-pairs
+ of (lowercased-name, value).
+
+ Internally we actually store the representation as three-tuples,
+ including both the raw original casing, in order to preserve casing
+ over-the-wire, and the lowercased name, for case-insensitive comparisions.
+
+ r = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.org"), ("Connection", "keep-alive")],
+ http_version="1.1",
+ )
+ assert r.headers == [
+ (b"host", b"example.org"),
+ (b"connection", b"keep-alive")
+ ]
+ assert r.headers.raw_items() == [
+ (b"Host", b"example.org"),
+ (b"Connection", b"keep-alive")
+ ]
+ """
+
+ __slots__ = "_full_items"
+
+ def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
+ self._full_items = full_items
+
+ def __bool__(self) -> bool:
+ return bool(self._full_items)
+
+ def __eq__(self, other: object) -> bool:
+ return list(self) == list(other) # type: ignore
+
+ def __len__(self) -> int:
+ return len(self._full_items)
+
+ def __repr__(self) -> str:
+ return "" % repr(list(self))
+
+ def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
+ _, name, value = self._full_items[idx]
+ return (name, value)
+
+ def raw_items(self) -> List[Tuple[bytes, bytes]]:
+ return [(raw_name, value) for raw_name, _, value in self._full_items]
+
+
+HeaderTypes = Union[
+ List[Tuple[bytes, bytes]],
+ List[Tuple[bytes, str]],
+ List[Tuple[str, bytes]],
+ List[Tuple[str, str]],
+]
+
+
+@overload
+def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
+ ...
+
+
+@overload
+def normalize_and_validate(headers: HeaderTypes, _parsed: Literal[False]) -> Headers:
+ ...
+
+
+@overload
+def normalize_and_validate(
+ headers: Union[Headers, HeaderTypes], _parsed: bool = False
+) -> Headers:
+ ...
+
+
+def normalize_and_validate(
+ headers: Union[Headers, HeaderTypes], _parsed: bool = False
+) -> Headers:
+ new_headers = []
+ seen_content_length = None
+ saw_transfer_encoding = False
+ for name, value in headers:
+ # For headers coming out of the parser, we can safely skip some steps,
+ # because it always returns bytes and has already run these regexes
+ # over the data:
+ if not _parsed:
+ name = bytesify(name)
+ value = bytesify(value)
+ validate(_field_name_re, name, "Illegal header name {!r}", name)
+ validate(_field_value_re, value, "Illegal header value {!r}", value)
+ assert isinstance(name, bytes)
+ assert isinstance(value, bytes)
+
+ raw_name = name
+ name = name.lower()
+ if name == b"content-length":
+ lengths = {length.strip() for length in value.split(b",")}
+ if len(lengths) != 1:
+ raise LocalProtocolError("conflicting Content-Length headers")
+ value = lengths.pop()
+ validate(_content_length_re, value, "bad Content-Length")
+ if seen_content_length is None:
+ seen_content_length = value
+ new_headers.append((raw_name, name, value))
+ elif seen_content_length != value:
+ raise LocalProtocolError("conflicting Content-Length headers")
+ elif name == b"transfer-encoding":
+ # "A server that receives a request message with a transfer coding
+ # it does not understand SHOULD respond with 501 (Not
+ # Implemented)."
+ # https://tools.ietf.org/html/rfc7230#section-3.3.1
+ if saw_transfer_encoding:
+ raise LocalProtocolError(
+ "multiple Transfer-Encoding headers", error_status_hint=501
+ )
+ # "All transfer-coding names are case-insensitive"
+ # -- https://tools.ietf.org/html/rfc7230#section-4
+ value = value.lower()
+ if value != b"chunked":
+ raise LocalProtocolError(
+ "Only Transfer-Encoding: chunked is supported",
+ error_status_hint=501,
+ )
+ saw_transfer_encoding = True
+ new_headers.append((raw_name, name, value))
+ else:
+ new_headers.append((raw_name, name, value))
+ return Headers(new_headers)
+
+
+def get_comma_header(headers: Headers, name: bytes) -> List[bytes]:
+ # Should only be used for headers whose value is a list of
+ # comma-separated, case-insensitive values.
+ #
+ # The header name `name` is expected to be lower-case bytes.
+ #
+ # Connection: meets these criteria (including cast insensitivity).
+ #
+ # Content-Length: technically is just a single value (1*DIGIT), but the
+ # standard makes reference to implementations that do multiple values, and
+ # using this doesn't hurt. Ditto, case insensitivity doesn't things either
+ # way.
+ #
+ # Transfer-Encoding: is more complex (allows for quoted strings), so
+ # splitting on , is actually wrong. For example, this is legal:
+ #
+ # Transfer-Encoding: foo; options="1,2", chunked
+ #
+ # and should be parsed as
+ #
+ # foo; options="1,2"
+ # chunked
+ #
+ # but this naive function will parse it as
+ #
+ # foo; options="1
+ # 2"
+ # chunked
+ #
+ # However, this is okay because the only thing we are going to do with
+ # any Transfer-Encoding is reject ones that aren't just "chunked", so
+ # both of these will be treated the same anyway.
+ #
+ # Expect: the only legal value is the literal string
+ # "100-continue". Splitting on commas is harmless. Case insensitive.
+ #
+ out: List[bytes] = []
+ for _, found_name, found_raw_value in headers._full_items:
+ if found_name == name:
+ found_raw_value = found_raw_value.lower()
+ for found_split_value in found_raw_value.split(b","):
+ found_split_value = found_split_value.strip()
+ if found_split_value:
+ out.append(found_split_value)
+ return out
+
+
+def set_comma_header(headers: Headers, name: bytes, new_values: List[bytes]) -> Headers:
+ # The header name `name` is expected to be lower-case bytes.
+ #
+ # Note that when we store the header we use title casing for the header
+ # names, in order to match the conventional HTTP header style.
+ #
+ # Simply calling `.title()` is a blunt approach, but it's correct
+ # here given the cases where we're using `set_comma_header`...
+ #
+ # Connection, Content-Length, Transfer-Encoding.
+ new_headers: List[Tuple[bytes, bytes]] = []
+ for found_raw_name, found_name, found_raw_value in headers._full_items:
+ if found_name != name:
+ new_headers.append((found_raw_name, found_raw_value))
+ for new_value in new_values:
+ new_headers.append((name.title(), new_value))
+ return normalize_and_validate(new_headers)
+
+
+def has_expect_100_continue(request: "Request") -> bool:
+ # https://tools.ietf.org/html/rfc7231#section-5.1.1
+ # "A server that receives a 100-continue expectation in an HTTP/1.0 request
+ # MUST ignore that expectation."
+ if request.http_version < b"1.1":
+ return False
+ expect = get_comma_header(request.headers, b"expect")
+ return b"100-continue" in expect
diff --git a/.venv/Lib/site-packages/h11/_readers.py b/.venv/Lib/site-packages/h11/_readers.py
new file mode 100644
index 0000000..08a9574
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_readers.py
@@ -0,0 +1,247 @@
+# Code to read HTTP data
+#
+# Strategy: each reader is a callable which takes a ReceiveBuffer object, and
+# either:
+# 1) consumes some of it and returns an Event
+# 2) raises a LocalProtocolError (for consistency -- e.g. we call validate()
+# and it might raise a LocalProtocolError, so simpler just to always use
+# this)
+# 3) returns None, meaning "I need more data"
+#
+# If they have a .read_eof attribute, then this will be called if an EOF is
+# received -- but this is optional. Either way, the actual ConnectionClosed
+# event will be generated afterwards.
+#
+# READERS is a dict describing how to pick a reader. It maps states to either:
+# - a reader
+# - or, for body readers, a dict of per-framing reader factories
+
+import re
+from typing import Any, Callable, Dict, Iterable, NoReturn, Optional, Tuple, Type, Union
+
+from ._abnf import chunk_header, header_field, request_line, status_line
+from ._events import Data, EndOfMessage, InformationalResponse, Request, Response
+from ._receivebuffer import ReceiveBuffer
+from ._state import (
+ CLIENT,
+ CLOSED,
+ DONE,
+ IDLE,
+ MUST_CLOSE,
+ SEND_BODY,
+ SEND_RESPONSE,
+ SERVER,
+)
+from ._util import LocalProtocolError, RemoteProtocolError, Sentinel, validate
+
+__all__ = ["READERS"]
+
+header_field_re = re.compile(header_field.encode("ascii"))
+obs_fold_re = re.compile(rb"[ \t]+")
+
+
+def _obsolete_line_fold(lines: Iterable[bytes]) -> Iterable[bytes]:
+ it = iter(lines)
+ last: Optional[bytes] = None
+ for line in it:
+ match = obs_fold_re.match(line)
+ if match:
+ if last is None:
+ raise LocalProtocolError("continuation line at start of headers")
+ if not isinstance(last, bytearray):
+ # Cast to a mutable type, avoiding copy on append to ensure O(n) time
+ last = bytearray(last)
+ last += b" "
+ last += line[match.end() :]
+ else:
+ if last is not None:
+ yield last
+ last = line
+ if last is not None:
+ yield last
+
+
+def _decode_header_lines(
+ lines: Iterable[bytes],
+) -> Iterable[Tuple[bytes, bytes]]:
+ for line in _obsolete_line_fold(lines):
+ matches = validate(header_field_re, line, "illegal header line: {!r}", line)
+ yield (matches["field_name"], matches["field_value"])
+
+
+request_line_re = re.compile(request_line.encode("ascii"))
+
+
+def maybe_read_from_IDLE_client(buf: ReceiveBuffer) -> Optional[Request]:
+ lines = buf.maybe_extract_lines()
+ if lines is None:
+ if buf.is_next_line_obviously_invalid_request_line():
+ raise LocalProtocolError("illegal request line")
+ return None
+ if not lines:
+ raise LocalProtocolError("no request line received")
+ matches = validate(
+ request_line_re, lines[0], "illegal request line: {!r}", lines[0]
+ )
+ return Request(
+ headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches
+ )
+
+
+status_line_re = re.compile(status_line.encode("ascii"))
+
+
+def maybe_read_from_SEND_RESPONSE_server(
+ buf: ReceiveBuffer,
+) -> Union[InformationalResponse, Response, None]:
+ lines = buf.maybe_extract_lines()
+ if lines is None:
+ if buf.is_next_line_obviously_invalid_request_line():
+ raise LocalProtocolError("illegal request line")
+ return None
+ if not lines:
+ raise LocalProtocolError("no response line received")
+ matches = validate(status_line_re, lines[0], "illegal status line: {!r}", lines[0])
+ http_version = (
+ b"1.1" if matches["http_version"] is None else matches["http_version"]
+ )
+ reason = b"" if matches["reason"] is None else matches["reason"]
+ status_code = int(matches["status_code"])
+ class_: Union[Type[InformationalResponse], Type[Response]] = (
+ InformationalResponse if status_code < 200 else Response
+ )
+ return class_(
+ headers=list(_decode_header_lines(lines[1:])),
+ _parsed=True,
+ status_code=status_code,
+ reason=reason,
+ http_version=http_version,
+ )
+
+
+class ContentLengthReader:
+ def __init__(self, length: int) -> None:
+ self._length = length
+ self._remaining = length
+
+ def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]:
+ if self._remaining == 0:
+ return EndOfMessage()
+ data = buf.maybe_extract_at_most(self._remaining)
+ if data is None:
+ return None
+ self._remaining -= len(data)
+ return Data(data=data)
+
+ def read_eof(self) -> NoReturn:
+ raise RemoteProtocolError(
+ "peer closed connection without sending complete message body "
+ "(received {} bytes, expected {})".format(
+ self._length - self._remaining, self._length
+ )
+ )
+
+
+chunk_header_re = re.compile(chunk_header.encode("ascii"))
+
+
+class ChunkedReader:
+ def __init__(self) -> None:
+ self._bytes_in_chunk = 0
+ # After reading a chunk, we have to throw away the trailing \r\n; if
+ # this is >0 then we discard that many bytes before resuming regular
+ # de-chunkification.
+ self._bytes_to_discard = 0
+ self._reading_trailer = False
+
+ def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]:
+ if self._reading_trailer:
+ lines = buf.maybe_extract_lines()
+ if lines is None:
+ return None
+ return EndOfMessage(headers=list(_decode_header_lines(lines)))
+ if self._bytes_to_discard > 0:
+ data = buf.maybe_extract_at_most(self._bytes_to_discard)
+ if data is None:
+ return None
+ self._bytes_to_discard -= len(data)
+ if self._bytes_to_discard > 0:
+ return None
+ # else, fall through and read some more
+ assert self._bytes_to_discard == 0
+ if self._bytes_in_chunk == 0:
+ # We need to refill our chunk count
+ chunk_header = buf.maybe_extract_next_line()
+ if chunk_header is None:
+ return None
+ matches = validate(
+ chunk_header_re,
+ chunk_header,
+ "illegal chunk header: {!r}",
+ chunk_header,
+ )
+ # XX FIXME: we discard chunk extensions. Does anyone care?
+ self._bytes_in_chunk = int(matches["chunk_size"], base=16)
+ if self._bytes_in_chunk == 0:
+ self._reading_trailer = True
+ return self(buf)
+ chunk_start = True
+ else:
+ chunk_start = False
+ assert self._bytes_in_chunk > 0
+ data = buf.maybe_extract_at_most(self._bytes_in_chunk)
+ if data is None:
+ return None
+ self._bytes_in_chunk -= len(data)
+ if self._bytes_in_chunk == 0:
+ self._bytes_to_discard = 2
+ chunk_end = True
+ else:
+ chunk_end = False
+ return Data(data=data, chunk_start=chunk_start, chunk_end=chunk_end)
+
+ def read_eof(self) -> NoReturn:
+ raise RemoteProtocolError(
+ "peer closed connection without sending complete message body "
+ "(incomplete chunked read)"
+ )
+
+
+class Http10Reader:
+ def __call__(self, buf: ReceiveBuffer) -> Optional[Data]:
+ data = buf.maybe_extract_at_most(999999999)
+ if data is None:
+ return None
+ return Data(data=data)
+
+ def read_eof(self) -> EndOfMessage:
+ return EndOfMessage()
+
+
+def expect_nothing(buf: ReceiveBuffer) -> None:
+ if buf:
+ raise LocalProtocolError("Got data when expecting EOF")
+ return None
+
+
+ReadersType = Dict[
+ Union[Type[Sentinel], Tuple[Type[Sentinel], Type[Sentinel]]],
+ Union[Callable[..., Any], Dict[str, Callable[..., Any]]],
+]
+
+READERS: ReadersType = {
+ (CLIENT, IDLE): maybe_read_from_IDLE_client,
+ (SERVER, IDLE): maybe_read_from_SEND_RESPONSE_server,
+ (SERVER, SEND_RESPONSE): maybe_read_from_SEND_RESPONSE_server,
+ (CLIENT, DONE): expect_nothing,
+ (CLIENT, MUST_CLOSE): expect_nothing,
+ (CLIENT, CLOSED): expect_nothing,
+ (SERVER, DONE): expect_nothing,
+ (SERVER, MUST_CLOSE): expect_nothing,
+ (SERVER, CLOSED): expect_nothing,
+ SEND_BODY: {
+ "chunked": ChunkedReader,
+ "content-length": ContentLengthReader,
+ "http/1.0": Http10Reader,
+ },
+}
diff --git a/.venv/Lib/site-packages/h11/_receivebuffer.py b/.venv/Lib/site-packages/h11/_receivebuffer.py
new file mode 100644
index 0000000..e5c4e08
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_receivebuffer.py
@@ -0,0 +1,153 @@
+import re
+import sys
+from typing import List, Optional, Union
+
+__all__ = ["ReceiveBuffer"]
+
+
+# Operations we want to support:
+# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable),
+# or wait until there is one
+# - read at-most-N bytes
+# Goals:
+# - on average, do this fast
+# - worst case, do this in O(n) where n is the number of bytes processed
+# Plan:
+# - store bytearray, offset, how far we've searched for a separator token
+# - use the how-far-we've-searched data to avoid rescanning
+# - while doing a stream of uninterrupted processing, advance offset instead
+# of constantly copying
+# WARNING:
+# - I haven't benchmarked or profiled any of this yet.
+#
+# Note that starting in Python 3.4, deleting the initial n bytes from a
+# bytearray is amortized O(n), thanks to some excellent work by Antoine
+# Martin:
+#
+# https://bugs.python.org/issue19087
+#
+# This means that if we only supported 3.4+, we could get rid of the code here
+# involving self._start and self.compress, because it's doing exactly the same
+# thing that bytearray now does internally.
+#
+# BUT unfortunately, we still support 2.7, and reading short segments out of a
+# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually
+# delete this code. Yet:
+#
+# https://pythonclock.org/
+#
+# (Two things to double-check first though: make sure PyPy also has the
+# optimization, and benchmark to make sure it's a win, since we do have a
+# slightly clever thing where we delay calling compress() until we've
+# processed a whole event, which could in theory be slightly more efficient
+# than the internal bytearray support.)
+blank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE)
+
+
+class ReceiveBuffer:
+ def __init__(self) -> None:
+ self._data = bytearray()
+ self._next_line_search = 0
+ self._multiple_lines_search = 0
+
+ def __iadd__(self, byteslike: Union[bytes, bytearray]) -> "ReceiveBuffer":
+ self._data += byteslike
+ return self
+
+ def __bool__(self) -> bool:
+ return bool(len(self))
+
+ def __len__(self) -> int:
+ return len(self._data)
+
+ # for @property unprocessed_data
+ def __bytes__(self) -> bytes:
+ return bytes(self._data)
+
+ def _extract(self, count: int) -> bytearray:
+ # extracting an initial slice of the data buffer and return it
+ out = self._data[:count]
+ del self._data[:count]
+
+ self._next_line_search = 0
+ self._multiple_lines_search = 0
+
+ return out
+
+ def maybe_extract_at_most(self, count: int) -> Optional[bytearray]:
+ """
+ Extract a fixed number of bytes from the buffer.
+ """
+ out = self._data[:count]
+ if not out:
+ return None
+
+ return self._extract(count)
+
+ def maybe_extract_next_line(self) -> Optional[bytearray]:
+ """
+ Extract the first line, if it is completed in the buffer.
+ """
+ # Only search in buffer space that we've not already looked at.
+ search_start_index = max(0, self._next_line_search - 1)
+ partial_idx = self._data.find(b"\r\n", search_start_index)
+
+ if partial_idx == -1:
+ self._next_line_search = len(self._data)
+ return None
+
+ # + 2 is to compensate len(b"\r\n")
+ idx = partial_idx + 2
+
+ return self._extract(idx)
+
+ def maybe_extract_lines(self) -> Optional[List[bytearray]]:
+ """
+ Extract everything up to the first blank line, and return a list of lines.
+ """
+ # Handle the case where we have an immediate empty line.
+ if self._data[:1] == b"\n":
+ self._extract(1)
+ return []
+
+ if self._data[:2] == b"\r\n":
+ self._extract(2)
+ return []
+
+ # Only search in buffer space that we've not already looked at.
+ match = blank_line_regex.search(self._data, self._multiple_lines_search)
+ if match is None:
+ self._multiple_lines_search = max(0, len(self._data) - 2)
+ return None
+
+ # Truncate the buffer and return it.
+ idx = match.span(0)[-1]
+ out = self._extract(idx)
+ lines = out.split(b"\n")
+
+ for line in lines:
+ if line.endswith(b"\r"):
+ del line[-1]
+
+ assert lines[-2] == lines[-1] == b""
+
+ del lines[-2:]
+
+ return lines
+
+ # In theory we should wait until `\r\n` before starting to validate
+ # incoming data. However it's interesting to detect (very) invalid data
+ # early given they might not even contain `\r\n` at all (hence only
+ # timeout will get rid of them).
+ # This is not a 100% effective detection but more of a cheap sanity check
+ # allowing for early abort in some useful cases.
+ # This is especially interesting when peer is messing up with HTTPS and
+ # sent us a TLS stream where we were expecting plain HTTP given all
+ # versions of TLS so far start handshake with a 0x16 message type code.
+ def is_next_line_obviously_invalid_request_line(self) -> bool:
+ try:
+ # HTTP header line must not contain non-printable characters
+ # and should not start with a space
+ return self._data[0] < 0x21
+ except IndexError:
+ return False
diff --git a/.venv/Lib/site-packages/h11/_state.py b/.venv/Lib/site-packages/h11/_state.py
new file mode 100644
index 0000000..3593430
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_state.py
@@ -0,0 +1,367 @@
+################################################################
+# The core state machine
+################################################################
+#
+# Rule 1: everything that affects the state machine and state transitions must
+# live here in this file. As much as possible goes into the table-based
+# representation, but for the bits that don't quite fit, the actual code and
+# state must nonetheless live here.
+#
+# Rule 2: this file does not know about what role we're playing; it only knows
+# about HTTP request/response cycles in the abstract. This ensures that we
+# don't cheat and apply different rules to local and remote parties.
+#
+#
+# Theory of operation
+# ===================
+#
+# Possibly the simplest way to think about this is that we actually have 5
+# different state machines here. Yes, 5. These are:
+#
+# 1) The client state, with its complicated automaton (see the docs)
+# 2) The server state, with its complicated automaton (see the docs)
+# 3) The keep-alive state, with possible states {True, False}
+# 4) The SWITCH_CONNECT state, with possible states {False, True}
+# 5) The SWITCH_UPGRADE state, with possible states {False, True}
+#
+# For (3)-(5), the first state listed is the initial state.
+#
+# (1)-(3) are stored explicitly in member variables. The last
+# two are stored implicitly in the pending_switch_proposals set as:
+# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals)
+# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals)
+#
+# And each of these machines has two different kinds of transitions:
+#
+# a) Event-triggered
+# b) State-triggered
+#
+# Event triggered is the obvious thing that you'd think it is: some event
+# happens, and if it's the right event at the right time then a transition
+# happens. But there are somewhat complicated rules for which machines can
+# "see" which events. (As a rule of thumb, if a machine "sees" an event, this
+# means two things: the event can affect the machine, and if the machine is
+# not in a state where it expects that event then it's an error.) These rules
+# are:
+#
+# 1) The client machine sees all h11.events objects emitted by the client.
+#
+# 2) The server machine sees all h11.events objects emitted by the server.
+#
+# It also sees the client's Request event.
+#
+# And sometimes, server events are annotated with a _SWITCH_* event. For
+# example, we can have a (Response, _SWITCH_CONNECT) event, which is
+# different from a regular Response event.
+#
+# 3) The keep-alive machine sees the process_keep_alive_disabled() event
+# (which is derived from Request/Response events), and this event
+# transitions it from True -> False, or from False -> False. There's no way
+# to transition back.
+#
+# 4&5) The _SWITCH_* machines transition from False->True when we get a
+# Request that proposes the relevant type of switch (via
+# process_client_switch_proposals), and they go from True->False when we
+# get a Response that has no _SWITCH_* annotation.
+#
+# So that's event-triggered transitions.
+#
+# State-triggered transitions are less standard. What they do here is couple
+# the machines together. The way this works is, when certain *joint*
+# configurations of states are achieved, then we automatically transition to a
+# new *joint* state. So, for example, if we're ever in a joint state with
+#
+# client: DONE
+# keep-alive: False
+#
+# then the client state immediately transitions to:
+#
+# client: MUST_CLOSE
+#
+# This is fundamentally different from an event-based transition, because it
+# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state
+# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive
+# transitioned True -> False. Either way, once this precondition is satisfied,
+# this transition is immediately triggered.
+#
+# What if two conflicting state-based transitions get enabled at the same
+# time? In practice there's only one case where this arises (client DONE ->
+# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by
+# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition.
+#
+# Implementation
+# --------------
+#
+# The event-triggered transitions for the server and client machines are all
+# stored explicitly in a table. Ditto for the state-triggered transitions that
+# involve just the server and client state.
+#
+# The transitions for the other machines, and the state-triggered transitions
+# that involve the other machines, are written out as explicit Python code.
+#
+# It'd be nice if there were some cleaner way to do all this. This isn't
+# *too* terrible, but I feel like it could probably be better.
+#
+# WARNING
+# -------
+#
+# The script that generates the state machine diagrams for the docs knows how
+# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS
+# tables. But it can't automatically read the transitions that are written
+# directly in Python code. So if you touch those, you need to also update the
+# script to keep it in sync!
+from typing import cast, Dict, Optional, Set, Tuple, Type, Union
+
+from ._events import *
+from ._util import LocalProtocolError, Sentinel
+
+# Everything in __all__ gets re-exported as part of the h11 public API.
+__all__ = [
+ "CLIENT",
+ "SERVER",
+ "IDLE",
+ "SEND_RESPONSE",
+ "SEND_BODY",
+ "DONE",
+ "MUST_CLOSE",
+ "CLOSED",
+ "MIGHT_SWITCH_PROTOCOL",
+ "SWITCHED_PROTOCOL",
+ "ERROR",
+]
+
+
+class CLIENT(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class SERVER(Sentinel, metaclass=Sentinel):
+ pass
+
+
+# States
+class IDLE(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class SEND_RESPONSE(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class SEND_BODY(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class DONE(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class MUST_CLOSE(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class CLOSED(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class ERROR(Sentinel, metaclass=Sentinel):
+ pass
+
+
+# Switch types
+class MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel):
+ pass
+
+
+class _SWITCH_CONNECT(Sentinel, metaclass=Sentinel):
+ pass
+
+
+EventTransitionType = Dict[
+ Type[Sentinel],
+ Dict[
+ Type[Sentinel],
+ Dict[Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], Type[Sentinel]],
+ ],
+]
+
+EVENT_TRIGGERED_TRANSITIONS: EventTransitionType = {
+ CLIENT: {
+ IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED},
+ SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
+ DONE: {ConnectionClosed: CLOSED},
+ MUST_CLOSE: {ConnectionClosed: CLOSED},
+ CLOSED: {ConnectionClosed: CLOSED},
+ MIGHT_SWITCH_PROTOCOL: {},
+ SWITCHED_PROTOCOL: {},
+ ERROR: {},
+ },
+ SERVER: {
+ IDLE: {
+ ConnectionClosed: CLOSED,
+ Response: SEND_BODY,
+ # Special case: server sees client Request events, in this form
+ (Request, CLIENT): SEND_RESPONSE,
+ },
+ SEND_RESPONSE: {
+ InformationalResponse: SEND_RESPONSE,
+ Response: SEND_BODY,
+ (InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL,
+ (Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL,
+ },
+ SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE},
+ DONE: {ConnectionClosed: CLOSED},
+ MUST_CLOSE: {ConnectionClosed: CLOSED},
+ CLOSED: {ConnectionClosed: CLOSED},
+ SWITCHED_PROTOCOL: {},
+ ERROR: {},
+ },
+}
+
+StateTransitionType = Dict[
+ Tuple[Type[Sentinel], Type[Sentinel]], Dict[Type[Sentinel], Type[Sentinel]]
+]
+
+# NB: there are also some special-case state-triggered transitions hard-coded
+# into _fire_state_triggered_transitions below.
+STATE_TRIGGERED_TRANSITIONS: StateTransitionType = {
+ # (Client state, Server state) -> new states
+ # Protocol negotiation
+ (MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL},
+ # Socket shutdown
+ (CLOSED, DONE): {SERVER: MUST_CLOSE},
+ (CLOSED, IDLE): {SERVER: MUST_CLOSE},
+ (ERROR, DONE): {SERVER: MUST_CLOSE},
+ (DONE, CLOSED): {CLIENT: MUST_CLOSE},
+ (IDLE, CLOSED): {CLIENT: MUST_CLOSE},
+ (DONE, ERROR): {CLIENT: MUST_CLOSE},
+}
+
+
+class ConnectionState:
+ def __init__(self) -> None:
+ # Extra bits of state that don't quite fit into the state model.
+
+ # If this is False then it enables the automatic DONE -> MUST_CLOSE
+ # transition. Don't set this directly; call .keep_alive_disabled()
+ self.keep_alive = True
+
+ # This is a subset of {UPGRADE, CONNECT}, containing the proposals
+ # made by the client for switching protocols.
+ self.pending_switch_proposals: Set[Type[Sentinel]] = set()
+
+ self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE}
+
+ def process_error(self, role: Type[Sentinel]) -> None:
+ self.states[role] = ERROR
+ self._fire_state_triggered_transitions()
+
+ def process_keep_alive_disabled(self) -> None:
+ self.keep_alive = False
+ self._fire_state_triggered_transitions()
+
+ def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None:
+ self.pending_switch_proposals.add(switch_event)
+ self._fire_state_triggered_transitions()
+
+ def process_event(
+ self,
+ role: Type[Sentinel],
+ event_type: Type[Event],
+ server_switch_event: Optional[Type[Sentinel]] = None,
+ ) -> None:
+ _event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type
+ if server_switch_event is not None:
+ assert role is SERVER
+ if server_switch_event not in self.pending_switch_proposals:
+ raise LocalProtocolError(
+ "Received server {} event without a pending proposal".format(
+ server_switch_event
+ )
+ )
+ _event_type = (event_type, server_switch_event)
+ if server_switch_event is None and _event_type is Response:
+ self.pending_switch_proposals = set()
+ self._fire_event_triggered_transitions(role, _event_type)
+ # Special case: the server state does get to see Request
+ # events.
+ if _event_type is Request:
+ assert role is CLIENT
+ self._fire_event_triggered_transitions(SERVER, (Request, CLIENT))
+ self._fire_state_triggered_transitions()
+
+ def _fire_event_triggered_transitions(
+ self,
+ role: Type[Sentinel],
+ event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]],
+ ) -> None:
+ state = self.states[role]
+ try:
+ new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type]
+ except KeyError:
+ event_type = cast(Type[Event], event_type)
+ raise LocalProtocolError(
+ "can't handle event type {} when role={} and state={}".format(
+ event_type.__name__, role, self.states[role]
+ )
+ ) from None
+ self.states[role] = new_state
+
+ def _fire_state_triggered_transitions(self) -> None:
+ # We apply these rules repeatedly until converging on a fixed point
+ while True:
+ start_states = dict(self.states)
+
+ # It could happen that both these special-case transitions are
+ # enabled at the same time:
+ #
+ # DONE -> MIGHT_SWITCH_PROTOCOL
+ # DONE -> MUST_CLOSE
+ #
+ # For example, this will always be true of a HTTP/1.0 client
+ # requesting CONNECT. If this happens, the protocol switch takes
+ # priority. From there the client will either go to
+ # SWITCHED_PROTOCOL, in which case it's none of our business when
+ # they close the connection, or else the server will deny the
+ # request, in which case the client will go back to DONE and then
+ # from there to MUST_CLOSE.
+ if self.pending_switch_proposals:
+ if self.states[CLIENT] is DONE:
+ self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL
+
+ if not self.pending_switch_proposals:
+ if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL:
+ self.states[CLIENT] = DONE
+
+ if not self.keep_alive:
+ for role in (CLIENT, SERVER):
+ if self.states[role] is DONE:
+ self.states[role] = MUST_CLOSE
+
+ # Tabular state-triggered transitions
+ joint_state = (self.states[CLIENT], self.states[SERVER])
+ changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {})
+ self.states.update(changes)
+
+ if self.states == start_states:
+ # Fixed point reached
+ return
+
+ def start_next_cycle(self) -> None:
+ if self.states != {CLIENT: DONE, SERVER: DONE}:
+ raise LocalProtocolError(
+ "not in a reusable state. self.states={}".format(self.states)
+ )
+ # Can't reach DONE/DONE with any of these active, but still, let's be
+ # sure.
+ assert self.keep_alive
+ assert not self.pending_switch_proposals
+ self.states = {CLIENT: IDLE, SERVER: IDLE}
diff --git a/.venv/Lib/site-packages/h11/_util.py b/.venv/Lib/site-packages/h11/_util.py
new file mode 100644
index 0000000..6718445
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_util.py
@@ -0,0 +1,135 @@
+from typing import Any, Dict, NoReturn, Pattern, Tuple, Type, TypeVar, Union
+
+__all__ = [
+ "ProtocolError",
+ "LocalProtocolError",
+ "RemoteProtocolError",
+ "validate",
+ "bytesify",
+]
+
+
+class ProtocolError(Exception):
+ """Exception indicating a violation of the HTTP/1.1 protocol.
+
+ This as an abstract base class, with two concrete base classes:
+ :exc:`LocalProtocolError`, which indicates that you tried to do something
+ that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which
+ indicates that the remote peer tried to do something that HTTP/1.1 says is
+ illegal. See :ref:`error-handling` for details.
+
+ In addition to the normal :exc:`Exception` features, it has one attribute:
+
+ .. attribute:: error_status_hint
+
+ This gives a suggestion as to what status code a server might use if
+ this error occurred as part of a request.
+
+ For a :exc:`RemoteProtocolError`, this is useful as a suggestion for
+ how you might want to respond to a misbehaving peer, if you're
+ implementing a server.
+
+ For a :exc:`LocalProtocolError`, this can be taken as a suggestion for
+ how your peer might have responded to *you* if h11 had allowed you to
+ continue.
+
+ The default is 400 Bad Request, a generic catch-all for protocol
+ violations.
+
+ """
+
+ def __init__(self, msg: str, error_status_hint: int = 400) -> None:
+ if type(self) is ProtocolError:
+ raise TypeError("tried to directly instantiate ProtocolError")
+ Exception.__init__(self, msg)
+ self.error_status_hint = error_status_hint
+
+
+# Strategy: there are a number of public APIs where a LocalProtocolError can
+# be raised (send(), all the different event constructors, ...), and only one
+# public API where RemoteProtocolError can be raised
+# (receive_data()). Therefore we always raise LocalProtocolError internally,
+# and then receive_data will translate this into a RemoteProtocolError.
+#
+# Internally:
+# LocalProtocolError is the generic "ProtocolError".
+# Externally:
+# LocalProtocolError is for local errors and RemoteProtocolError is for
+# remote errors.
+class LocalProtocolError(ProtocolError):
+ def _reraise_as_remote_protocol_error(self) -> NoReturn:
+ # After catching a LocalProtocolError, use this method to re-raise it
+ # as a RemoteProtocolError. This method must be called from inside an
+ # except: block.
+ #
+ # An easy way to get an equivalent RemoteProtocolError is just to
+ # modify 'self' in place.
+ self.__class__ = RemoteProtocolError # type: ignore
+ # But the re-raising is somewhat non-trivial -- you might think that
+ # now that we've modified the in-flight exception object, that just
+ # doing 'raise' to re-raise it would be enough. But it turns out that
+ # this doesn't work, because Python tracks the exception type
+ # (exc_info[0]) separately from the exception object (exc_info[1]),
+ # and we only modified the latter. So we really do need to re-raise
+ # the new type explicitly.
+ # On py3, the traceback is part of the exception object, so our
+ # in-place modification preserved it and we can just re-raise:
+ raise self
+
+
+class RemoteProtocolError(ProtocolError):
+ pass
+
+
+def validate(
+ regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
+) -> Dict[str, bytes]:
+ match = regex.fullmatch(data)
+ if not match:
+ if format_args:
+ msg = msg.format(*format_args)
+ raise LocalProtocolError(msg)
+ return match.groupdict()
+
+
+# Sentinel values
+#
+# - Inherit identity-based comparison and hashing from object
+# - Have a nice repr
+# - Have a *bonus property*: type(sentinel) is sentinel
+#
+# The bonus property is useful if you want to take the return value from
+# next_event() and do some sort of dispatch based on type(event).
+
+_T_Sentinel = TypeVar("_T_Sentinel", bound="Sentinel")
+
+
+class Sentinel(type):
+ def __new__(
+ cls: Type[_T_Sentinel],
+ name: str,
+ bases: Tuple[type, ...],
+ namespace: Dict[str, Any],
+ **kwds: Any
+ ) -> _T_Sentinel:
+ assert bases == (Sentinel,)
+ v = super().__new__(cls, name, bases, namespace, **kwds)
+ v.__class__ = v # type: ignore
+ return v
+
+ def __repr__(self) -> str:
+ return self.__name__
+
+
+# Used for methods, request targets, HTTP versions, header names, and header
+# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always
+# returns bytes.
+def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
+ # Fast-path:
+ if type(s) is bytes:
+ return s
+ if isinstance(s, str):
+ s = s.encode("ascii")
+ if isinstance(s, int):
+ raise TypeError("expected bytes-like object, not int")
+ return bytes(s)
diff --git a/.venv/Lib/site-packages/h11/_version.py b/.venv/Lib/site-packages/h11/_version.py
new file mode 100644
index 0000000..4c89113
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_version.py
@@ -0,0 +1,16 @@
+# This file must be kept very simple, because it is consumed from several
+# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc.
+
+# We use a simple scheme:
+# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev
+# where the +dev versions are never released into the wild, they're just what
+# we stick into the VCS in between releases.
+#
+# This is compatible with PEP 440:
+# http://legacy.python.org/dev/peps/pep-0440/
+# via the use of the "local suffix" "+dev", which is disallowed on index
+# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we
+# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before*
+# 1.0.0.)
+
+__version__ = "0.14.0"
diff --git a/.venv/Lib/site-packages/h11/_writers.py b/.venv/Lib/site-packages/h11/_writers.py
new file mode 100644
index 0000000..939cdb9
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/_writers.py
@@ -0,0 +1,145 @@
+# Code to read HTTP data
+#
+# Strategy: each writer takes an event + a write-some-bytes function, which is
+# calls.
+#
+# WRITERS is a dict describing how to pick a reader. It maps states to either:
+# - a writer
+# - or, for body writers, a dict of framin-dependent writer factories
+
+from typing import Any, Callable, Dict, List, Tuple, Type, Union
+
+from ._events import Data, EndOfMessage, Event, InformationalResponse, Request, Response
+from ._headers import Headers
+from ._state import CLIENT, IDLE, SEND_BODY, SEND_RESPONSE, SERVER
+from ._util import LocalProtocolError, Sentinel
+
+__all__ = ["WRITERS"]
+
+Writer = Callable[[bytes], Any]
+
+
+def write_headers(headers: Headers, write: Writer) -> None:
+ # "Since the Host field-value is critical information for handling a
+ # request, a user agent SHOULD generate Host as the first header field
+ # following the request-line." - RFC 7230
+ raw_items = headers._full_items
+ for raw_name, name, value in raw_items:
+ if name == b"host":
+ write(b"%s: %s\r\n" % (raw_name, value))
+ for raw_name, name, value in raw_items:
+ if name != b"host":
+ write(b"%s: %s\r\n" % (raw_name, value))
+ write(b"\r\n")
+
+
+def write_request(request: Request, write: Writer) -> None:
+ if request.http_version != b"1.1":
+ raise LocalProtocolError("I only send HTTP/1.1")
+ write(b"%s %s HTTP/1.1\r\n" % (request.method, request.target))
+ write_headers(request.headers, write)
+
+
+# Shared between InformationalResponse and Response
+def write_any_response(
+ response: Union[InformationalResponse, Response], write: Writer
+) -> None:
+ if response.http_version != b"1.1":
+ raise LocalProtocolError("I only send HTTP/1.1")
+ status_bytes = str(response.status_code).encode("ascii")
+ # We don't bother sending ascii status messages like "OK"; they're
+ # optional and ignored by the protocol. (But the space after the numeric
+ # status code is mandatory.)
+ #
+ # XX FIXME: could at least make an effort to pull out the status message
+ # from stdlib's http.HTTPStatus table. Or maybe just steal their enums
+ # (either by import or copy/paste). We already accept them as status codes
+ # since they're of type IntEnum < int.
+ write(b"HTTP/1.1 %s %s\r\n" % (status_bytes, response.reason))
+ write_headers(response.headers, write)
+
+
+class BodyWriter:
+ def __call__(self, event: Event, write: Writer) -> None:
+ if type(event) is Data:
+ self.send_data(event.data, write)
+ elif type(event) is EndOfMessage:
+ self.send_eom(event.headers, write)
+ else: # pragma: no cover
+ assert False
+
+ def send_data(self, data: bytes, write: Writer) -> None:
+ pass
+
+ def send_eom(self, headers: Headers, write: Writer) -> None:
+ pass
+
+
+#
+# These are all careful not to do anything to 'data' except call len(data) and
+# write(data). This allows us to transparently pass-through funny objects,
+# like placeholder objects referring to files on disk that will be sent via
+# sendfile(2).
+#
+class ContentLengthWriter(BodyWriter):
+ def __init__(self, length: int) -> None:
+ self._length = length
+
+ def send_data(self, data: bytes, write: Writer) -> None:
+ self._length -= len(data)
+ if self._length < 0:
+ raise LocalProtocolError("Too much data for declared Content-Length")
+ write(data)
+
+ def send_eom(self, headers: Headers, write: Writer) -> None:
+ if self._length != 0:
+ raise LocalProtocolError("Too little data for declared Content-Length")
+ if headers:
+ raise LocalProtocolError("Content-Length and trailers don't mix")
+
+
+class ChunkedWriter(BodyWriter):
+ def send_data(self, data: bytes, write: Writer) -> None:
+ # if we encoded 0-length data in the naive way, it would look like an
+ # end-of-message.
+ if not data:
+ return
+ write(b"%x\r\n" % len(data))
+ write(data)
+ write(b"\r\n")
+
+ def send_eom(self, headers: Headers, write: Writer) -> None:
+ write(b"0\r\n")
+ write_headers(headers, write)
+
+
+class Http10Writer(BodyWriter):
+ def send_data(self, data: bytes, write: Writer) -> None:
+ write(data)
+
+ def send_eom(self, headers: Headers, write: Writer) -> None:
+ if headers:
+ raise LocalProtocolError("can't send trailers to HTTP/1.0 client")
+ # no need to close the socket ourselves, that will be taken care of by
+ # Connection: close machinery
+
+
+WritersType = Dict[
+ Union[Tuple[Type[Sentinel], Type[Sentinel]], Type[Sentinel]],
+ Union[
+ Dict[str, Type[BodyWriter]],
+ Callable[[Union[InformationalResponse, Response], Writer], None],
+ Callable[[Request, Writer], None],
+ ],
+]
+
+WRITERS: WritersType = {
+ (CLIENT, IDLE): write_request,
+ (SERVER, IDLE): write_any_response,
+ (SERVER, SEND_RESPONSE): write_any_response,
+ SEND_BODY: {
+ "chunked": ChunkedWriter,
+ "content-length": ContentLengthWriter,
+ "http/1.0": Http10Writer,
+ },
+}
diff --git a/.venv/Lib/site-packages/h11/py.typed b/.venv/Lib/site-packages/h11/py.typed
new file mode 100644
index 0000000..f5642f7
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/py.typed
@@ -0,0 +1 @@
+Marker
diff --git a/.venv/Lib/site-packages/h11/tests/__init__.py b/.venv/Lib/site-packages/h11/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..0e5a4a7
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/helpers.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/helpers.cpython-312.pyc
new file mode 100644
index 0000000..5618d7f
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/helpers.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/test_against_stdlib_http.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/test_against_stdlib_http.cpython-312.pyc
new file mode 100644
index 0000000..924f83f
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/test_against_stdlib_http.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/test_connection.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/test_connection.cpython-312.pyc
new file mode 100644
index 0000000..3c3a552
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/test_connection.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/test_events.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/test_events.cpython-312.pyc
new file mode 100644
index 0000000..8546740
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/test_events.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/test_headers.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/test_headers.cpython-312.pyc
new file mode 100644
index 0000000..6ca79f4
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/test_headers.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/test_helpers.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/test_helpers.cpython-312.pyc
new file mode 100644
index 0000000..67a01a1
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/test_helpers.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/test_io.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/test_io.cpython-312.pyc
new file mode 100644
index 0000000..0b0fac1
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/test_io.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/test_receivebuffer.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/test_receivebuffer.cpython-312.pyc
new file mode 100644
index 0000000..75f76cd
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/test_receivebuffer.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/test_state.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/test_state.cpython-312.pyc
new file mode 100644
index 0000000..862677d
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/test_state.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/__pycache__/test_util.cpython-312.pyc b/.venv/Lib/site-packages/h11/tests/__pycache__/test_util.cpython-312.pyc
new file mode 100644
index 0000000..811ac6f
Binary files /dev/null and b/.venv/Lib/site-packages/h11/tests/__pycache__/test_util.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/h11/tests/data/test-file b/.venv/Lib/site-packages/h11/tests/data/test-file
new file mode 100644
index 0000000..d0be0a6
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/data/test-file
@@ -0,0 +1 @@
+92b12bc045050b55b848d37167a1a63947c364579889ce1d39788e45e9fac9e5
diff --git a/.venv/Lib/site-packages/h11/tests/helpers.py b/.venv/Lib/site-packages/h11/tests/helpers.py
new file mode 100644
index 0000000..571be44
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/helpers.py
@@ -0,0 +1,101 @@
+from typing import cast, List, Type, Union, ValuesView
+
+from .._connection import Connection, NEED_DATA, PAUSED
+from .._events import (
+ ConnectionClosed,
+ Data,
+ EndOfMessage,
+ Event,
+ InformationalResponse,
+ Request,
+ Response,
+)
+from .._state import CLIENT, CLOSED, DONE, MUST_CLOSE, SERVER
+from .._util import Sentinel
+
+try:
+ from typing import Literal
+except ImportError:
+ from typing_extensions import Literal # type: ignore
+
+
+def get_all_events(conn: Connection) -> List[Event]:
+ got_events = []
+ while True:
+ event = conn.next_event()
+ if event in (NEED_DATA, PAUSED):
+ break
+ event = cast(Event, event)
+ got_events.append(event)
+ if type(event) is ConnectionClosed:
+ break
+ return got_events
+
+
+def receive_and_get(conn: Connection, data: bytes) -> List[Event]:
+ conn.receive_data(data)
+ return get_all_events(conn)
+
+
+# Merges adjacent Data events, converts payloads to bytestrings, and removes
+# chunk boundaries.
+def normalize_data_events(in_events: List[Event]) -> List[Event]:
+ out_events: List[Event] = []
+ for event in in_events:
+ if type(event) is Data:
+ event = Data(data=bytes(event.data), chunk_start=False, chunk_end=False)
+ if out_events and type(out_events[-1]) is type(event) is Data:
+ out_events[-1] = Data(
+ data=out_events[-1].data + event.data,
+ chunk_start=out_events[-1].chunk_start,
+ chunk_end=out_events[-1].chunk_end,
+ )
+ else:
+ out_events.append(event)
+ return out_events
+
+
+# Given that we want to write tests that push some events through a Connection
+# and check that its state updates appropriately... we might as make a habit
+# of pushing them through two Connections with a fake network link in
+# between.
+class ConnectionPair:
+ def __init__(self) -> None:
+ self.conn = {CLIENT: Connection(CLIENT), SERVER: Connection(SERVER)}
+ self.other = {CLIENT: SERVER, SERVER: CLIENT}
+
+ @property
+ def conns(self) -> ValuesView[Connection]:
+ return self.conn.values()
+
+ # expect="match" if expect=send_events; expect=[...] to say what expected
+ def send(
+ self,
+ role: Type[Sentinel],
+ send_events: Union[List[Event], Event],
+ expect: Union[List[Event], Event, Literal["match"]] = "match",
+ ) -> bytes:
+ if not isinstance(send_events, list):
+ send_events = [send_events]
+ data = b""
+ closed = False
+ for send_event in send_events:
+ new_data = self.conn[role].send(send_event)
+ if new_data is None:
+ closed = True
+ else:
+ data += new_data
+ # send uses b"" to mean b"", and None to mean closed
+ # receive uses b"" to mean closed, and None to mean "try again"
+ # so we have to translate between the two conventions
+ if data:
+ self.conn[self.other[role]].receive_data(data)
+ if closed:
+ self.conn[self.other[role]].receive_data(b"")
+ got_events = get_all_events(self.conn[self.other[role]])
+ if expect == "match":
+ expect = send_events
+ if not isinstance(expect, list):
+ expect = [expect]
+ assert got_events == expect
+ return data
diff --git a/.venv/Lib/site-packages/h11/tests/test_against_stdlib_http.py b/.venv/Lib/site-packages/h11/tests/test_against_stdlib_http.py
new file mode 100644
index 0000000..d2ee131
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/test_against_stdlib_http.py
@@ -0,0 +1,115 @@
+import json
+import os.path
+import socket
+import socketserver
+import threading
+from contextlib import closing, contextmanager
+from http.server import SimpleHTTPRequestHandler
+from typing import Callable, Generator
+from urllib.request import urlopen
+
+import h11
+
+
+@contextmanager
+def socket_server(
+ handler: Callable[..., socketserver.BaseRequestHandler]
+) -> Generator[socketserver.TCPServer, None, None]:
+ httpd = socketserver.TCPServer(("127.0.0.1", 0), handler)
+ thread = threading.Thread(
+ target=httpd.serve_forever, kwargs={"poll_interval": 0.01}
+ )
+ thread.daemon = True
+ try:
+ thread.start()
+ yield httpd
+ finally:
+ httpd.shutdown()
+
+
+test_file_path = os.path.join(os.path.dirname(__file__), "data/test-file")
+with open(test_file_path, "rb") as f:
+ test_file_data = f.read()
+
+
+class SingleMindedRequestHandler(SimpleHTTPRequestHandler):
+ def translate_path(self, path: str) -> str:
+ return test_file_path
+
+
+def test_h11_as_client() -> None:
+ with socket_server(SingleMindedRequestHandler) as httpd:
+ with closing(socket.create_connection(httpd.server_address)) as s:
+ c = h11.Connection(h11.CLIENT)
+
+ s.sendall(
+ c.send( # type: ignore[arg-type]
+ h11.Request(
+ method="GET", target="/foo", headers=[("Host", "localhost")]
+ )
+ )
+ )
+ s.sendall(c.send(h11.EndOfMessage())) # type: ignore[arg-type]
+
+ data = bytearray()
+ while True:
+ event = c.next_event()
+ print(event)
+ if event is h11.NEED_DATA:
+ # Use a small read buffer to make things more challenging
+ # and exercise more paths :-)
+ c.receive_data(s.recv(10))
+ continue
+ if type(event) is h11.Response:
+ assert event.status_code == 200
+ if type(event) is h11.Data:
+ data += event.data
+ if type(event) is h11.EndOfMessage:
+ break
+ assert bytes(data) == test_file_data
+
+
+class H11RequestHandler(socketserver.BaseRequestHandler):
+ def handle(self) -> None:
+ with closing(self.request) as s:
+ c = h11.Connection(h11.SERVER)
+ request = None
+ while True:
+ event = c.next_event()
+ if event is h11.NEED_DATA:
+ # Use a small read buffer to make things more challenging
+ # and exercise more paths :-)
+ c.receive_data(s.recv(10))
+ continue
+ if type(event) is h11.Request:
+ request = event
+ if type(event) is h11.EndOfMessage:
+ break
+ assert request is not None
+ info = json.dumps(
+ {
+ "method": request.method.decode("ascii"),
+ "target": request.target.decode("ascii"),
+ "headers": {
+ name.decode("ascii"): value.decode("ascii")
+ for (name, value) in request.headers
+ },
+ }
+ )
+ s.sendall(c.send(h11.Response(status_code=200, headers=[]))) # type: ignore[arg-type]
+ s.sendall(c.send(h11.Data(data=info.encode("ascii"))))
+ s.sendall(c.send(h11.EndOfMessage()))
+
+
+def test_h11_as_server() -> None:
+ with socket_server(H11RequestHandler) as httpd:
+ host, port = httpd.server_address
+ url = "http://{}:{}/some-path".format(host, port)
+ with closing(urlopen(url)) as f:
+ assert f.getcode() == 200
+ data = f.read()
+ info = json.loads(data.decode("ascii"))
+ print(info)
+ assert info["method"] == "GET"
+ assert info["target"] == "/some-path"
+ assert "urllib" in info["headers"]["user-agent"]
diff --git a/.venv/Lib/site-packages/h11/tests/test_connection.py b/.venv/Lib/site-packages/h11/tests/test_connection.py
new file mode 100644
index 0000000..73a27b9
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/test_connection.py
@@ -0,0 +1,1122 @@
+from typing import Any, cast, Dict, List, Optional, Tuple, Type
+
+import pytest
+
+from .._connection import _body_framing, _keep_alive, Connection, NEED_DATA, PAUSED
+from .._events import (
+ ConnectionClosed,
+ Data,
+ EndOfMessage,
+ Event,
+ InformationalResponse,
+ Request,
+ Response,
+)
+from .._state import (
+ CLIENT,
+ CLOSED,
+ DONE,
+ ERROR,
+ IDLE,
+ MIGHT_SWITCH_PROTOCOL,
+ MUST_CLOSE,
+ SEND_BODY,
+ SEND_RESPONSE,
+ SERVER,
+ SWITCHED_PROTOCOL,
+)
+from .._util import LocalProtocolError, RemoteProtocolError, Sentinel
+from .helpers import ConnectionPair, get_all_events, receive_and_get
+
+
+def test__keep_alive() -> None:
+ assert _keep_alive(
+ Request(method="GET", target="/", headers=[("Host", "Example.com")])
+ )
+ assert not _keep_alive(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "Example.com"), ("Connection", "close")],
+ )
+ )
+ assert not _keep_alive(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "Example.com"), ("Connection", "a, b, cLOse, foo")],
+ )
+ )
+ assert not _keep_alive(
+ Request(method="GET", target="/", headers=[], http_version="1.0") # type: ignore[arg-type]
+ )
+
+ assert _keep_alive(Response(status_code=200, headers=[])) # type: ignore[arg-type]
+ assert not _keep_alive(Response(status_code=200, headers=[("Connection", "close")]))
+ assert not _keep_alive(
+ Response(status_code=200, headers=[("Connection", "a, b, cLOse, foo")])
+ )
+ assert not _keep_alive(Response(status_code=200, headers=[], http_version="1.0")) # type: ignore[arg-type]
+
+
+def test__body_framing() -> None:
+ def headers(cl: Optional[int], te: bool) -> List[Tuple[str, str]]:
+ headers = []
+ if cl is not None:
+ headers.append(("Content-Length", str(cl)))
+ if te:
+ headers.append(("Transfer-Encoding", "chunked"))
+ return headers
+
+ def resp(
+ status_code: int = 200, cl: Optional[int] = None, te: bool = False
+ ) -> Response:
+ return Response(status_code=status_code, headers=headers(cl, te))
+
+ def req(cl: Optional[int] = None, te: bool = False) -> Request:
+ h = headers(cl, te)
+ h += [("Host", "example.com")]
+ return Request(method="GET", target="/", headers=h)
+
+ # Special cases where the headers are ignored:
+ for kwargs in [{}, {"cl": 100}, {"te": True}, {"cl": 100, "te": True}]:
+ kwargs = cast(Dict[str, Any], kwargs)
+ for meth, r in [
+ (b"HEAD", resp(**kwargs)),
+ (b"GET", resp(status_code=204, **kwargs)),
+ (b"GET", resp(status_code=304, **kwargs)),
+ ]:
+ assert _body_framing(meth, r) == ("content-length", (0,))
+
+ # Transfer-encoding
+ for kwargs in [{"te": True}, {"cl": 100, "te": True}]:
+ kwargs = cast(Dict[str, Any], kwargs)
+ for meth, r in [(None, req(**kwargs)), (b"GET", resp(**kwargs))]: # type: ignore
+ assert _body_framing(meth, r) == ("chunked", ())
+
+ # Content-Length
+ for meth, r in [(None, req(cl=100)), (b"GET", resp(cl=100))]: # type: ignore
+ assert _body_framing(meth, r) == ("content-length", (100,))
+
+ # No headers
+ assert _body_framing(None, req()) == ("content-length", (0,)) # type: ignore
+ assert _body_framing(b"GET", resp()) == ("http/1.0", ())
+
+
+def test_Connection_basics_and_content_length() -> None:
+ with pytest.raises(ValueError):
+ Connection("CLIENT") # type: ignore
+
+ p = ConnectionPair()
+ assert p.conn[CLIENT].our_role is CLIENT
+ assert p.conn[CLIENT].their_role is SERVER
+ assert p.conn[SERVER].our_role is SERVER
+ assert p.conn[SERVER].their_role is CLIENT
+
+ data = p.send(
+ CLIENT,
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Content-Length", "10")],
+ ),
+ )
+ assert data == (
+ b"GET / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 10\r\n\r\n"
+ )
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+ assert p.conn[CLIENT].our_state is SEND_BODY
+ assert p.conn[CLIENT].their_state is SEND_RESPONSE
+ assert p.conn[SERVER].our_state is SEND_RESPONSE
+ assert p.conn[SERVER].their_state is SEND_BODY
+
+ assert p.conn[CLIENT].their_http_version is None
+ assert p.conn[SERVER].their_http_version == b"1.1"
+
+ data = p.send(SERVER, InformationalResponse(status_code=100, headers=[])) # type: ignore[arg-type]
+ assert data == b"HTTP/1.1 100 \r\n\r\n"
+
+ data = p.send(SERVER, Response(status_code=200, headers=[("Content-Length", "11")]))
+ assert data == b"HTTP/1.1 200 \r\nContent-Length: 11\r\n\r\n"
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY}
+
+ assert p.conn[CLIENT].their_http_version == b"1.1"
+ assert p.conn[SERVER].their_http_version == b"1.1"
+
+ data = p.send(CLIENT, Data(data=b"12345"))
+ assert data == b"12345"
+ data = p.send(
+ CLIENT, Data(data=b"67890"), expect=[Data(data=b"67890"), EndOfMessage()]
+ )
+ assert data == b"67890"
+ data = p.send(CLIENT, EndOfMessage(), expect=[])
+ assert data == b""
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY}
+
+ data = p.send(SERVER, Data(data=b"1234567890"))
+ assert data == b"1234567890"
+ data = p.send(SERVER, Data(data=b"1"), expect=[Data(data=b"1"), EndOfMessage()])
+ assert data == b"1"
+ data = p.send(SERVER, EndOfMessage(), expect=[])
+ assert data == b""
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: DONE}
+
+
+def test_chunked() -> None:
+ p = ConnectionPair()
+
+ p.send(
+ CLIENT,
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")],
+ ),
+ )
+ data = p.send(CLIENT, Data(data=b"1234567890", chunk_start=True, chunk_end=True))
+ assert data == b"a\r\n1234567890\r\n"
+ data = p.send(CLIENT, Data(data=b"abcde", chunk_start=True, chunk_end=True))
+ assert data == b"5\r\nabcde\r\n"
+ data = p.send(CLIENT, Data(data=b""), expect=[])
+ assert data == b""
+ data = p.send(CLIENT, EndOfMessage(headers=[("hello", "there")]))
+ assert data == b"0\r\nhello: there\r\n\r\n"
+
+ p.send(
+ SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")])
+ )
+ p.send(SERVER, Data(data=b"54321", chunk_start=True, chunk_end=True))
+ p.send(SERVER, Data(data=b"12345", chunk_start=True, chunk_end=True))
+ p.send(SERVER, EndOfMessage())
+
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: DONE}
+
+
+def test_chunk_boundaries() -> None:
+ conn = Connection(our_role=SERVER)
+
+ request = (
+ b"POST / HTTP/1.1\r\n"
+ b"Host: example.com\r\n"
+ b"Transfer-Encoding: chunked\r\n"
+ b"\r\n"
+ )
+ conn.receive_data(request)
+ assert conn.next_event() == Request(
+ method="POST",
+ target="/",
+ headers=[("Host", "example.com"), ("Transfer-Encoding", "chunked")],
+ )
+ assert conn.next_event() is NEED_DATA
+
+ conn.receive_data(b"5\r\nhello\r\n")
+ assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True)
+
+ conn.receive_data(b"5\r\nhel")
+ assert conn.next_event() == Data(data=b"hel", chunk_start=True, chunk_end=False)
+
+ conn.receive_data(b"l")
+ assert conn.next_event() == Data(data=b"l", chunk_start=False, chunk_end=False)
+
+ conn.receive_data(b"o\r\n")
+ assert conn.next_event() == Data(data=b"o", chunk_start=False, chunk_end=True)
+
+ conn.receive_data(b"5\r\nhello")
+ assert conn.next_event() == Data(data=b"hello", chunk_start=True, chunk_end=True)
+
+ conn.receive_data(b"\r\n")
+ assert conn.next_event() == NEED_DATA
+
+ conn.receive_data(b"0\r\n\r\n")
+ assert conn.next_event() == EndOfMessage()
+
+
+def test_client_talking_to_http10_server() -> None:
+ c = Connection(CLIENT)
+ c.send(Request(method="GET", target="/", headers=[("Host", "example.com")]))
+ c.send(EndOfMessage())
+ assert c.our_state is DONE
+ # No content-length, so Http10 framing for body
+ assert receive_and_get(c, b"HTTP/1.0 200 OK\r\n\r\n") == [
+ Response(status_code=200, headers=[], http_version="1.0", reason=b"OK") # type: ignore[arg-type]
+ ]
+ assert c.our_state is MUST_CLOSE
+ assert receive_and_get(c, b"12345") == [Data(data=b"12345")]
+ assert receive_and_get(c, b"67890") == [Data(data=b"67890")]
+ assert receive_and_get(c, b"") == [EndOfMessage(), ConnectionClosed()]
+ assert c.their_state is CLOSED
+
+
+def test_server_talking_to_http10_client() -> None:
+ c = Connection(SERVER)
+ # No content-length, so no body
+ # NB: no host header
+ assert receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n") == [
+ Request(method="GET", target="/", headers=[], http_version="1.0"), # type: ignore[arg-type]
+ EndOfMessage(),
+ ]
+ assert c.their_state is MUST_CLOSE
+
+ # We automatically Connection: close back at them
+ assert (
+ c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
+ == b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n"
+ )
+
+ assert c.send(Data(data=b"12345")) == b"12345"
+ assert c.send(EndOfMessage()) == b""
+ assert c.our_state is MUST_CLOSE
+
+ # Check that it works if they do send Content-Length
+ c = Connection(SERVER)
+ # NB: no host header
+ assert receive_and_get(c, b"POST / HTTP/1.0\r\nContent-Length: 10\r\n\r\n1") == [
+ Request(
+ method="POST",
+ target="/",
+ headers=[("Content-Length", "10")],
+ http_version="1.0",
+ ),
+ Data(data=b"1"),
+ ]
+ assert receive_and_get(c, b"234567890") == [Data(data=b"234567890"), EndOfMessage()]
+ assert c.their_state is MUST_CLOSE
+ assert receive_and_get(c, b"") == [ConnectionClosed()]
+
+
+def test_automatic_transfer_encoding_in_response() -> None:
+ # Check that in responses, the user can specify either Transfer-Encoding:
+ # chunked or no framing at all, and in both cases we automatically select
+ # the right option depending on whether the peer speaks HTTP/1.0 or
+ # HTTP/1.1
+ for user_headers in [
+ [("Transfer-Encoding", "chunked")],
+ [],
+ # In fact, this even works if Content-Length is set,
+ # because if both are set then Transfer-Encoding wins
+ [("Transfer-Encoding", "chunked"), ("Content-Length", "100")],
+ ]:
+ user_headers = cast(List[Tuple[str, str]], user_headers)
+ p = ConnectionPair()
+ p.send(
+ CLIENT,
+ [
+ Request(method="GET", target="/", headers=[("Host", "example.com")]),
+ EndOfMessage(),
+ ],
+ )
+ # When speaking to HTTP/1.1 client, all of the above cases get
+ # normalized to Transfer-Encoding: chunked
+ p.send(
+ SERVER,
+ Response(status_code=200, headers=user_headers),
+ expect=Response(
+ status_code=200, headers=[("Transfer-Encoding", "chunked")]
+ ),
+ )
+
+ # When speaking to HTTP/1.0 client, all of the above cases get
+ # normalized to no-framing-headers
+ c = Connection(SERVER)
+ receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n")
+ assert (
+ c.send(Response(status_code=200, headers=user_headers))
+ == b"HTTP/1.1 200 \r\nConnection: close\r\n\r\n"
+ )
+ assert c.send(Data(data=b"12345")) == b"12345"
+
+
+def test_automagic_connection_close_handling() -> None:
+ p = ConnectionPair()
+ # If the user explicitly sets Connection: close, then we notice and
+ # respect it
+ p.send(
+ CLIENT,
+ [
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Connection", "close")],
+ ),
+ EndOfMessage(),
+ ],
+ )
+ for conn in p.conns:
+ assert conn.states[CLIENT] is MUST_CLOSE
+ # And if the client sets it, the server automatically echoes it back
+ p.send(
+ SERVER,
+ # no header here...
+ [Response(status_code=204, headers=[]), EndOfMessage()], # type: ignore[arg-type]
+ # ...but oh look, it arrived anyway
+ expect=[
+ Response(status_code=204, headers=[("connection", "close")]),
+ EndOfMessage(),
+ ],
+ )
+ for conn in p.conns:
+ assert conn.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE}
+
+
+def test_100_continue() -> None:
+ def setup() -> ConnectionPair:
+ p = ConnectionPair()
+ p.send(
+ CLIENT,
+ Request(
+ method="GET",
+ target="/",
+ headers=[
+ ("Host", "example.com"),
+ ("Content-Length", "100"),
+ ("Expect", "100-continue"),
+ ],
+ ),
+ )
+ for conn in p.conns:
+ assert conn.client_is_waiting_for_100_continue
+ assert not p.conn[CLIENT].they_are_waiting_for_100_continue
+ assert p.conn[SERVER].they_are_waiting_for_100_continue
+ return p
+
+ # Disabled by 100 Continue
+ p = setup()
+ p.send(SERVER, InformationalResponse(status_code=100, headers=[])) # type: ignore[arg-type]
+ for conn in p.conns:
+ assert not conn.client_is_waiting_for_100_continue
+ assert not conn.they_are_waiting_for_100_continue
+
+ # Disabled by a real response
+ p = setup()
+ p.send(
+ SERVER, Response(status_code=200, headers=[("Transfer-Encoding", "chunked")])
+ )
+ for conn in p.conns:
+ assert not conn.client_is_waiting_for_100_continue
+ assert not conn.they_are_waiting_for_100_continue
+
+ # Disabled by the client going ahead and sending stuff anyway
+ p = setup()
+ p.send(CLIENT, Data(data=b"12345"))
+ for conn in p.conns:
+ assert not conn.client_is_waiting_for_100_continue
+ assert not conn.they_are_waiting_for_100_continue
+
+
+def test_max_incomplete_event_size_countermeasure() -> None:
+ # Infinitely long headers are definitely not okay
+ c = Connection(SERVER)
+ c.receive_data(b"GET / HTTP/1.0\r\nEndless: ")
+ assert c.next_event() is NEED_DATA
+ with pytest.raises(RemoteProtocolError):
+ while True:
+ c.receive_data(b"a" * 1024)
+ c.next_event()
+
+ # Checking that the same header is accepted / rejected depending on the
+ # max_incomplete_event_size setting:
+ c = Connection(SERVER, max_incomplete_event_size=5000)
+ c.receive_data(b"GET / HTTP/1.0\r\nBig: ")
+ c.receive_data(b"a" * 4000)
+ c.receive_data(b"\r\n\r\n")
+ assert get_all_events(c) == [
+ Request(
+ method="GET", target="/", http_version="1.0", headers=[("big", "a" * 4000)]
+ ),
+ EndOfMessage(),
+ ]
+
+ c = Connection(SERVER, max_incomplete_event_size=4000)
+ c.receive_data(b"GET / HTTP/1.0\r\nBig: ")
+ c.receive_data(b"a" * 4000)
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+ # Temporarily exceeding the size limit is fine, as long as its done with
+ # complete events:
+ c = Connection(SERVER, max_incomplete_event_size=5000)
+ c.receive_data(b"GET / HTTP/1.0\r\nContent-Length: 10000")
+ c.receive_data(b"\r\n\r\n" + b"a" * 10000)
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/",
+ http_version="1.0",
+ headers=[("Content-Length", "10000")],
+ ),
+ Data(data=b"a" * 10000),
+ EndOfMessage(),
+ ]
+
+ c = Connection(SERVER, max_incomplete_event_size=100)
+ # Two pipelined requests to create a way-too-big receive buffer... but
+ # it's fine because we're not checking
+ c.receive_data(
+ b"GET /1 HTTP/1.1\r\nHost: a\r\n\r\n"
+ b"GET /2 HTTP/1.1\r\nHost: b\r\n\r\n" + b"X" * 1000
+ )
+ assert get_all_events(c) == [
+ Request(method="GET", target="/1", headers=[("host", "a")]),
+ EndOfMessage(),
+ ]
+ # Even more data comes in, still no problem
+ c.receive_data(b"X" * 1000)
+ # We can respond and reuse to get the second pipelined request
+ c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
+ c.send(EndOfMessage())
+ c.start_next_cycle()
+ assert get_all_events(c) == [
+ Request(method="GET", target="/2", headers=[("host", "b")]),
+ EndOfMessage(),
+ ]
+ # But once we unpause and try to read the next message, and find that it's
+ # incomplete and the buffer is *still* way too large, then *that's* a
+ # problem:
+ c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
+ c.send(EndOfMessage())
+ c.start_next_cycle()
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+def test_reuse_simple() -> None:
+ p = ConnectionPair()
+ p.send(
+ CLIENT,
+ [Request(method="GET", target="/", headers=[("Host", "a")]), EndOfMessage()],
+ )
+ p.send(
+ SERVER,
+ [
+ Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
+ EndOfMessage(),
+ ],
+ )
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: DONE}
+ conn.start_next_cycle()
+
+ p.send(
+ CLIENT,
+ [
+ Request(method="DELETE", target="/foo", headers=[("Host", "a")]),
+ EndOfMessage(),
+ ],
+ )
+ p.send(
+ SERVER,
+ [
+ Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]),
+ EndOfMessage(),
+ ],
+ )
+
+
+def test_pipelining() -> None:
+ # Client doesn't support pipelining, so we have to do this by hand
+ c = Connection(SERVER)
+ assert c.next_event() is NEED_DATA
+ # 3 requests all bunched up
+ c.receive_data(
+ b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
+ b"12345"
+ b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
+ b"67890"
+ b"GET /3 HTTP/1.1\r\nHost: a.com\r\n\r\n"
+ )
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/1",
+ headers=[("Host", "a.com"), ("Content-Length", "5")],
+ ),
+ Data(data=b"12345"),
+ EndOfMessage(),
+ ]
+ assert c.their_state is DONE
+ assert c.our_state is SEND_RESPONSE
+
+ assert c.next_event() is PAUSED
+
+ c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
+ c.send(EndOfMessage())
+ assert c.their_state is DONE
+ assert c.our_state is DONE
+
+ c.start_next_cycle()
+
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/2",
+ headers=[("Host", "a.com"), ("Content-Length", "5")],
+ ),
+ Data(data=b"67890"),
+ EndOfMessage(),
+ ]
+ assert c.next_event() is PAUSED
+ c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
+ c.send(EndOfMessage())
+ c.start_next_cycle()
+
+ assert get_all_events(c) == [
+ Request(method="GET", target="/3", headers=[("Host", "a.com")]),
+ EndOfMessage(),
+ ]
+ # Doesn't pause this time, no trailing data
+ assert c.next_event() is NEED_DATA
+ c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
+ c.send(EndOfMessage())
+
+ # Arrival of more data triggers pause
+ assert c.next_event() is NEED_DATA
+ c.receive_data(b"SADF")
+ assert c.next_event() is PAUSED
+ assert c.trailing_data == (b"SADF", False)
+ # If EOF arrives while paused, we don't see that either:
+ c.receive_data(b"")
+ assert c.trailing_data == (b"SADF", True)
+ assert c.next_event() is PAUSED
+ c.receive_data(b"")
+ assert c.next_event() is PAUSED
+ # Can't call receive_data with non-empty buf after closing it
+ with pytest.raises(RuntimeError):
+ c.receive_data(b"FDSA")
+
+
+def test_protocol_switch() -> None:
+ for (req, deny, accept) in [
+ (
+ Request(
+ method="CONNECT",
+ target="example.com:443",
+ headers=[("Host", "foo"), ("Content-Length", "1")],
+ ),
+ Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]),
+ Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
+ ),
+ (
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")],
+ ),
+ Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
+ InformationalResponse(status_code=101, headers=[("Upgrade", "a")]),
+ ),
+ (
+ Request(
+ method="CONNECT",
+ target="example.com:443",
+ headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")],
+ ),
+ Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]),
+ # Accept CONNECT, not upgrade
+ Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
+ ),
+ (
+ Request(
+ method="CONNECT",
+ target="example.com:443",
+ headers=[("Host", "foo"), ("Content-Length", "1"), ("Upgrade", "a, b")],
+ ),
+ Response(status_code=404, headers=[(b"transfer-encoding", b"chunked")]),
+ # Accept Upgrade, not CONNECT
+ InformationalResponse(status_code=101, headers=[("Upgrade", "b")]),
+ ),
+ ]:
+
+ def setup() -> ConnectionPair:
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ # No switch-related state change stuff yet; the client has to
+ # finish the request before that kicks in
+ for conn in p.conns:
+ assert conn.states[CLIENT] is SEND_BODY
+ p.send(CLIENT, [Data(data=b"1"), EndOfMessage()])
+ for conn in p.conns:
+ assert conn.states[CLIENT] is MIGHT_SWITCH_PROTOCOL
+ assert p.conn[SERVER].next_event() is PAUSED
+ return p
+
+ # Test deny case
+ p = setup()
+ p.send(SERVER, deny)
+ for conn in p.conns:
+ assert conn.states == {CLIENT: DONE, SERVER: SEND_BODY}
+ p.send(SERVER, EndOfMessage())
+ # Check that re-use is still allowed after a denial
+ for conn in p.conns:
+ conn.start_next_cycle()
+
+ # Test accept case
+ p = setup()
+ p.send(SERVER, accept)
+ for conn in p.conns:
+ assert conn.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL}
+ conn.receive_data(b"123")
+ assert conn.next_event() is PAUSED
+ conn.receive_data(b"456")
+ assert conn.next_event() is PAUSED
+ assert conn.trailing_data == (b"123456", False)
+
+ # Pausing in might-switch, then recovery
+ # (weird artificial case where the trailing data actually is valid
+ # HTTP for some reason, because this makes it easier to test the state
+ # logic)
+ p = setup()
+ sc = p.conn[SERVER]
+ sc.receive_data(b"GET / HTTP/1.0\r\n\r\n")
+ assert sc.next_event() is PAUSED
+ assert sc.trailing_data == (b"GET / HTTP/1.0\r\n\r\n", False)
+ sc.send(deny)
+ assert sc.next_event() is PAUSED
+ sc.send(EndOfMessage())
+ sc.start_next_cycle()
+ assert get_all_events(sc) == [
+ Request(method="GET", target="/", headers=[], http_version="1.0"), # type: ignore[arg-type]
+ EndOfMessage(),
+ ]
+
+ # When we're DONE, have no trailing data, and the connection gets
+ # closed, we report ConnectionClosed(). When we're in might-switch or
+ # switched, we don't.
+ p = setup()
+ sc = p.conn[SERVER]
+ sc.receive_data(b"")
+ assert sc.next_event() is PAUSED
+ assert sc.trailing_data == (b"", True)
+ p.send(SERVER, accept)
+ assert sc.next_event() is PAUSED
+
+ p = setup()
+ sc = p.conn[SERVER]
+ sc.receive_data(b"")
+ assert sc.next_event() is PAUSED
+ sc.send(deny)
+ assert sc.next_event() == ConnectionClosed()
+
+ # You can't send after switching protocols, or while waiting for a
+ # protocol switch
+ p = setup()
+ with pytest.raises(LocalProtocolError):
+ p.conn[CLIENT].send(
+ Request(method="GET", target="/", headers=[("Host", "a")])
+ )
+ p = setup()
+ p.send(SERVER, accept)
+ with pytest.raises(LocalProtocolError):
+ p.conn[SERVER].send(Data(data=b"123"))
+
+
+def test_close_simple() -> None:
+ # Just immediately closing a new connection without anything having
+ # happened yet.
+ for (who_shot_first, who_shot_second) in [(CLIENT, SERVER), (SERVER, CLIENT)]:
+
+ def setup() -> ConnectionPair:
+ p = ConnectionPair()
+ p.send(who_shot_first, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.states == {
+ who_shot_first: CLOSED,
+ who_shot_second: MUST_CLOSE,
+ }
+ return p
+
+ # You can keep putting b"" into a closed connection, and you keep
+ # getting ConnectionClosed() out:
+ p = setup()
+ assert p.conn[who_shot_second].next_event() == ConnectionClosed()
+ assert p.conn[who_shot_second].next_event() == ConnectionClosed()
+ p.conn[who_shot_second].receive_data(b"")
+ assert p.conn[who_shot_second].next_event() == ConnectionClosed()
+ # Second party can close...
+ p = setup()
+ p.send(who_shot_second, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.our_state is CLOSED
+ assert conn.their_state is CLOSED
+ # But trying to receive new data on a closed connection is a
+ # RuntimeError (not ProtocolError, because the problem here isn't
+ # violation of HTTP, it's violation of physics)
+ p = setup()
+ with pytest.raises(RuntimeError):
+ p.conn[who_shot_second].receive_data(b"123")
+ # And receiving new data on a MUST_CLOSE connection is a ProtocolError
+ p = setup()
+ p.conn[who_shot_first].receive_data(b"GET")
+ with pytest.raises(RemoteProtocolError):
+ p.conn[who_shot_first].next_event()
+
+
+def test_close_different_states() -> None:
+ req = [
+ Request(method="GET", target="/foo", headers=[("Host", "a")]),
+ EndOfMessage(),
+ ]
+ resp = [
+ Response(status_code=200, headers=[(b"transfer-encoding", b"chunked")]),
+ EndOfMessage(),
+ ]
+
+ # Client before request
+ p = ConnectionPair()
+ p.send(CLIENT, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE}
+
+ # Client after request
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ p.send(CLIENT, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE}
+
+ # Server after request -> not allowed
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ with pytest.raises(LocalProtocolError):
+ p.conn[SERVER].send(ConnectionClosed())
+ p.conn[CLIENT].receive_data(b"")
+ with pytest.raises(RemoteProtocolError):
+ p.conn[CLIENT].next_event()
+
+ # Server after response
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ p.send(SERVER, resp)
+ p.send(SERVER, ConnectionClosed())
+ for conn in p.conns:
+ assert conn.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED}
+
+ # Both after closing (ConnectionClosed() is idempotent)
+ p = ConnectionPair()
+ p.send(CLIENT, req)
+ p.send(SERVER, resp)
+ p.send(CLIENT, ConnectionClosed())
+ p.send(SERVER, ConnectionClosed())
+ p.send(CLIENT, ConnectionClosed())
+ p.send(SERVER, ConnectionClosed())
+
+ # In the middle of sending -> not allowed
+ p = ConnectionPair()
+ p.send(
+ CLIENT,
+ Request(
+ method="GET", target="/", headers=[("Host", "a"), ("Content-Length", "10")]
+ ),
+ )
+ with pytest.raises(LocalProtocolError):
+ p.conn[CLIENT].send(ConnectionClosed())
+ p.conn[SERVER].receive_data(b"")
+ with pytest.raises(RemoteProtocolError):
+ p.conn[SERVER].next_event()
+
+
+# Receive several requests and then client shuts down their side of the
+# connection; we can respond to each
+def test_pipelined_close() -> None:
+ c = Connection(SERVER)
+ # 2 requests then a close
+ c.receive_data(
+ b"GET /1 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
+ b"12345"
+ b"GET /2 HTTP/1.1\r\nHost: a.com\r\nContent-Length: 5\r\n\r\n"
+ b"67890"
+ )
+ c.receive_data(b"")
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/1",
+ headers=[("host", "a.com"), ("content-length", "5")],
+ ),
+ Data(data=b"12345"),
+ EndOfMessage(),
+ ]
+ assert c.states[CLIENT] is DONE
+ c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
+ c.send(EndOfMessage())
+ assert c.states[SERVER] is DONE
+ c.start_next_cycle()
+ assert get_all_events(c) == [
+ Request(
+ method="GET",
+ target="/2",
+ headers=[("host", "a.com"), ("content-length", "5")],
+ ),
+ Data(data=b"67890"),
+ EndOfMessage(),
+ ConnectionClosed(),
+ ]
+ assert c.states == {CLIENT: CLOSED, SERVER: SEND_RESPONSE}
+ c.send(Response(status_code=200, headers=[])) # type: ignore[arg-type]
+ c.send(EndOfMessage())
+ assert c.states == {CLIENT: CLOSED, SERVER: MUST_CLOSE}
+ c.send(ConnectionClosed())
+ assert c.states == {CLIENT: CLOSED, SERVER: CLOSED}
+
+
+def test_sendfile() -> None:
+ class SendfilePlaceholder:
+ def __len__(self) -> int:
+ return 10
+
+ placeholder = SendfilePlaceholder()
+
+ def setup(
+ header: Tuple[str, str], http_version: str
+ ) -> Tuple[Connection, Optional[List[bytes]]]:
+ c = Connection(SERVER)
+ receive_and_get(
+ c, "GET / HTTP/{}\r\nHost: a\r\n\r\n".format(http_version).encode("ascii")
+ )
+ headers = []
+ if header:
+ headers.append(header)
+ c.send(Response(status_code=200, headers=headers))
+ return c, c.send_with_data_passthrough(Data(data=placeholder)) # type: ignore
+
+ c, data = setup(("Content-Length", "10"), "1.1")
+ assert data == [placeholder] # type: ignore
+ # Raises an error if the connection object doesn't think we've sent
+ # exactly 10 bytes
+ c.send(EndOfMessage())
+
+ _, data = setup(("Transfer-Encoding", "chunked"), "1.1")
+ assert placeholder in data # type: ignore
+ data[data.index(placeholder)] = b"x" * 10 # type: ignore
+ assert b"".join(data) == b"a\r\nxxxxxxxxxx\r\n" # type: ignore
+
+ c, data = setup(None, "1.0") # type: ignore
+ assert data == [placeholder] # type: ignore
+ assert c.our_state is SEND_BODY
+
+
+def test_errors() -> None:
+ # After a receive error, you can't receive
+ for role in [CLIENT, SERVER]:
+ c = Connection(our_role=role)
+ c.receive_data(b"gibberish\r\n\r\n")
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+ # Now any attempt to receive continues to raise
+ assert c.their_state is ERROR
+ assert c.our_state is not ERROR
+ print(c._cstate.states)
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+ # But we can still yell at the client for sending us gibberish
+ if role is SERVER:
+ assert (
+ c.send(Response(status_code=400, headers=[])) # type: ignore[arg-type]
+ == b"HTTP/1.1 400 \r\nConnection: close\r\n\r\n"
+ )
+
+ # After an error sending, you can no longer send
+ # (This is especially important for things like content-length errors,
+ # where there's complex internal state being modified)
+ def conn(role: Type[Sentinel]) -> Connection:
+ c = Connection(our_role=role)
+ if role is SERVER:
+ # Put it into the state where it *could* send a response...
+ receive_and_get(c, b"GET / HTTP/1.0\r\n\r\n")
+ assert c.our_state is SEND_RESPONSE
+ return c
+
+ for role in [CLIENT, SERVER]:
+ if role is CLIENT:
+ # This HTTP/1.0 request won't be detected as bad until after we go
+ # through the state machine and hit the writing code
+ good = Request(method="GET", target="/", headers=[("Host", "example.com")])
+ bad = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com")],
+ http_version="1.0",
+ )
+ elif role is SERVER:
+ good = Response(status_code=200, headers=[]) # type: ignore[arg-type,assignment]
+ bad = Response(status_code=200, headers=[], http_version="1.0") # type: ignore[arg-type,assignment]
+ # Make sure 'good' actually is good
+ c = conn(role)
+ c.send(good)
+ assert c.our_state is not ERROR
+ # Do that again, but this time sending 'bad' first
+ c = conn(role)
+ with pytest.raises(LocalProtocolError):
+ c.send(bad)
+ assert c.our_state is ERROR
+ assert c.their_state is not ERROR
+ # Now 'good' is not so good
+ with pytest.raises(LocalProtocolError):
+ c.send(good)
+
+ # And check send_failed() too
+ c = conn(role)
+ c.send_failed()
+ assert c.our_state is ERROR
+ assert c.their_state is not ERROR
+ # This is idempotent
+ c.send_failed()
+ assert c.our_state is ERROR
+ assert c.their_state is not ERROR
+
+
+def test_idle_receive_nothing() -> None:
+ # At one point this incorrectly raised an error
+ for role in [CLIENT, SERVER]:
+ c = Connection(role)
+ assert c.next_event() is NEED_DATA
+
+
+def test_connection_drop() -> None:
+ c = Connection(SERVER)
+ c.receive_data(b"GET /")
+ assert c.next_event() is NEED_DATA
+ c.receive_data(b"")
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+def test_408_request_timeout() -> None:
+ # Should be able to send this spontaneously as a server without seeing
+ # anything from client
+ p = ConnectionPair()
+ p.send(SERVER, Response(status_code=408, headers=[(b"connection", b"close")]))
+
+
+# This used to raise IndexError
+def test_empty_request() -> None:
+ c = Connection(SERVER)
+ c.receive_data(b"\r\n")
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+# This used to raise IndexError
+def test_empty_response() -> None:
+ c = Connection(CLIENT)
+ c.send(Request(method="GET", target="/", headers=[("Host", "a")]))
+ c.receive_data(b"\r\n")
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+@pytest.mark.parametrize(
+ "data",
+ [
+ b"\x00",
+ b"\x20",
+ b"\x16\x03\x01\x00\xa5", # Typical start of a TLS Client Hello
+ ],
+)
+def test_early_detection_of_invalid_request(data: bytes) -> None:
+ c = Connection(SERVER)
+ # Early detection should occur before even receiving a `\r\n`
+ c.receive_data(data)
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+@pytest.mark.parametrize(
+ "data",
+ [
+ b"\x00",
+ b"\x20",
+ b"\x16\x03\x03\x00\x31", # Typical start of a TLS Server Hello
+ ],
+)
+def test_early_detection_of_invalid_response(data: bytes) -> None:
+ c = Connection(CLIENT)
+ # Early detection should occur before even receiving a `\r\n`
+ c.receive_data(data)
+ with pytest.raises(RemoteProtocolError):
+ c.next_event()
+
+
+# This used to give different headers for HEAD and GET.
+# The correct way to handle HEAD is to put whatever headers we *would* have
+# put if it were a GET -- even though we know that for HEAD, those headers
+# will be ignored.
+def test_HEAD_framing_headers() -> None:
+ def setup(method: bytes, http_version: bytes) -> Connection:
+ c = Connection(SERVER)
+ c.receive_data(
+ method + b" / HTTP/" + http_version + b"\r\n" + b"Host: example.com\r\n\r\n"
+ )
+ assert type(c.next_event()) is Request
+ assert type(c.next_event()) is EndOfMessage
+ return c
+
+ for method in [b"GET", b"HEAD"]:
+ # No Content-Length, HTTP/1.1 peer, should use chunked
+ c = setup(method, b"1.1")
+ assert (
+ c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n" # type: ignore[arg-type]
+ b"Transfer-Encoding: chunked\r\n\r\n"
+ )
+
+ # No Content-Length, HTTP/1.0 peer, frame with connection: close
+ c = setup(method, b"1.0")
+ assert (
+ c.send(Response(status_code=200, headers=[])) == b"HTTP/1.1 200 \r\n" # type: ignore[arg-type]
+ b"Connection: close\r\n\r\n"
+ )
+
+ # Content-Length + Transfer-Encoding, TE wins
+ c = setup(method, b"1.1")
+ assert (
+ c.send(
+ Response(
+ status_code=200,
+ headers=[
+ ("Content-Length", "100"),
+ ("Transfer-Encoding", "chunked"),
+ ],
+ )
+ )
+ == b"HTTP/1.1 200 \r\n"
+ b"Transfer-Encoding: chunked\r\n\r\n"
+ )
+
+
+def test_special_exceptions_for_lost_connection_in_message_body() -> None:
+ c = Connection(SERVER)
+ c.receive_data(
+ b"POST / HTTP/1.1\r\n" b"Host: example.com\r\n" b"Content-Length: 100\r\n\r\n"
+ )
+ assert type(c.next_event()) is Request
+ assert c.next_event() is NEED_DATA
+ c.receive_data(b"12345")
+ assert c.next_event() == Data(data=b"12345")
+ c.receive_data(b"")
+ with pytest.raises(RemoteProtocolError) as excinfo:
+ c.next_event()
+ assert "received 5 bytes" in str(excinfo.value)
+ assert "expected 100" in str(excinfo.value)
+
+ c = Connection(SERVER)
+ c.receive_data(
+ b"POST / HTTP/1.1\r\n"
+ b"Host: example.com\r\n"
+ b"Transfer-Encoding: chunked\r\n\r\n"
+ )
+ assert type(c.next_event()) is Request
+ assert c.next_event() is NEED_DATA
+ c.receive_data(b"8\r\n012345")
+ assert c.next_event().data == b"012345" # type: ignore
+ c.receive_data(b"")
+ with pytest.raises(RemoteProtocolError) as excinfo:
+ c.next_event()
+ assert "incomplete chunked read" in str(excinfo.value)
diff --git a/.venv/Lib/site-packages/h11/tests/test_events.py b/.venv/Lib/site-packages/h11/tests/test_events.py
new file mode 100644
index 0000000..bc6c313
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/test_events.py
@@ -0,0 +1,150 @@
+from http import HTTPStatus
+
+import pytest
+
+from .. import _events
+from .._events import (
+ ConnectionClosed,
+ Data,
+ EndOfMessage,
+ Event,
+ InformationalResponse,
+ Request,
+ Response,
+)
+from .._util import LocalProtocolError
+
+
+def test_events() -> None:
+ with pytest.raises(LocalProtocolError):
+ # Missing Host:
+ req = Request(
+ method="GET", target="/", headers=[("a", "b")], http_version="1.1"
+ )
+ # But this is okay (HTTP/1.0)
+ req = Request(method="GET", target="/", headers=[("a", "b")], http_version="1.0")
+ # fields are normalized
+ assert req.method == b"GET"
+ assert req.target == b"/"
+ assert req.headers == [(b"a", b"b")]
+ assert req.http_version == b"1.0"
+
+ # This is also okay -- has a Host (with weird capitalization, which is ok)
+ req = Request(
+ method="GET",
+ target="/",
+ headers=[("a", "b"), ("hOSt", "example.com")],
+ http_version="1.1",
+ )
+ # we normalize header capitalization
+ assert req.headers == [(b"a", b"b"), (b"host", b"example.com")]
+
+ # Multiple host is bad too
+ with pytest.raises(LocalProtocolError):
+ req = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "a"), ("Host", "a")],
+ http_version="1.1",
+ )
+ # Even for HTTP/1.0
+ with pytest.raises(LocalProtocolError):
+ req = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "a"), ("Host", "a")],
+ http_version="1.0",
+ )
+
+ # Header values are validated
+ for bad_char in "\x00\r\n\f\v":
+ with pytest.raises(LocalProtocolError):
+ req = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "a"), ("Foo", "asd" + bad_char)],
+ http_version="1.0",
+ )
+
+ # But for compatibility we allow non-whitespace control characters, even
+ # though they're forbidden by the spec.
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "a"), ("Foo", "asd\x01\x02\x7f")],
+ http_version="1.0",
+ )
+
+ # Request target is validated
+ for bad_byte in b"\x00\x20\x7f\xee":
+ target = bytearray(b"/")
+ target.append(bad_byte)
+ with pytest.raises(LocalProtocolError):
+ Request(
+ method="GET", target=target, headers=[("Host", "a")], http_version="1.1"
+ )
+
+ # Request method is validated
+ with pytest.raises(LocalProtocolError):
+ Request(
+ method="GET / HTTP/1.1",
+ target=target,
+ headers=[("Host", "a")],
+ http_version="1.1",
+ )
+
+ ir = InformationalResponse(status_code=100, headers=[("Host", "a")])
+ assert ir.status_code == 100
+ assert ir.headers == [(b"host", b"a")]
+ assert ir.http_version == b"1.1"
+
+ with pytest.raises(LocalProtocolError):
+ InformationalResponse(status_code=200, headers=[("Host", "a")])
+
+ resp = Response(status_code=204, headers=[], http_version="1.0") # type: ignore[arg-type]
+ assert resp.status_code == 204
+ assert resp.headers == []
+ assert resp.http_version == b"1.0"
+
+ with pytest.raises(LocalProtocolError):
+ resp = Response(status_code=100, headers=[], http_version="1.0") # type: ignore[arg-type]
+
+ with pytest.raises(LocalProtocolError):
+ Response(status_code="100", headers=[], http_version="1.0") # type: ignore[arg-type]
+
+ with pytest.raises(LocalProtocolError):
+ InformationalResponse(status_code=b"100", headers=[], http_version="1.0") # type: ignore[arg-type]
+
+ d = Data(data=b"asdf")
+ assert d.data == b"asdf"
+
+ eom = EndOfMessage()
+ assert eom.headers == []
+
+ cc = ConnectionClosed()
+ assert repr(cc) == "ConnectionClosed()"
+
+
+def test_intenum_status_code() -> None:
+ # https://github.com/python-hyper/h11/issues/72
+
+ r = Response(status_code=HTTPStatus.OK, headers=[], http_version="1.0") # type: ignore[arg-type]
+ assert r.status_code == HTTPStatus.OK
+ assert type(r.status_code) is not type(HTTPStatus.OK)
+ assert type(r.status_code) is int
+
+
+def test_header_casing() -> None:
+ r = Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.org"), ("Connection", "keep-alive")],
+ http_version="1.1",
+ )
+ assert len(r.headers) == 2
+ assert r.headers[0] == (b"host", b"example.org")
+ assert r.headers == [(b"host", b"example.org"), (b"connection", b"keep-alive")]
+ assert r.headers.raw_items() == [
+ (b"Host", b"example.org"),
+ (b"Connection", b"keep-alive"),
+ ]
diff --git a/.venv/Lib/site-packages/h11/tests/test_headers.py b/.venv/Lib/site-packages/h11/tests/test_headers.py
new file mode 100644
index 0000000..ba53d08
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/test_headers.py
@@ -0,0 +1,157 @@
+import pytest
+
+from .._events import Request
+from .._headers import (
+ get_comma_header,
+ has_expect_100_continue,
+ Headers,
+ normalize_and_validate,
+ set_comma_header,
+)
+from .._util import LocalProtocolError
+
+
+def test_normalize_and_validate() -> None:
+ assert normalize_and_validate([("foo", "bar")]) == [(b"foo", b"bar")]
+ assert normalize_and_validate([(b"foo", b"bar")]) == [(b"foo", b"bar")]
+
+ # no leading/trailing whitespace in names
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b"foo ", "bar")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b" foo", "bar")])
+
+ # no weird characters in names
+ with pytest.raises(LocalProtocolError) as excinfo:
+ normalize_and_validate([(b"foo bar", b"baz")])
+ assert "foo bar" in str(excinfo.value)
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b"foo\x00bar", b"baz")])
+ # Not even 8-bit characters:
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b"foo\xffbar", b"baz")])
+ # And not even the control characters we allow in values:
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([(b"foo\x01bar", b"baz")])
+
+ # no return or NUL characters in values
+ with pytest.raises(LocalProtocolError) as excinfo:
+ normalize_and_validate([("foo", "bar\rbaz")])
+ assert "bar\\rbaz" in str(excinfo.value)
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "bar\nbaz")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "bar\x00baz")])
+ # no leading/trailing whitespace
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "barbaz ")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", " barbaz")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "barbaz\t")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("foo", "\tbarbaz")])
+
+ # content-length
+ assert normalize_and_validate([("Content-Length", "1")]) == [
+ (b"content-length", b"1")
+ ]
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("Content-Length", "asdf")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("Content-Length", "1x")])
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("Content-Length", "1"), ("Content-Length", "2")])
+ assert normalize_and_validate(
+ [("Content-Length", "0"), ("Content-Length", "0")]
+ ) == [(b"content-length", b"0")]
+ assert normalize_and_validate([("Content-Length", "0 , 0")]) == [
+ (b"content-length", b"0")
+ ]
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate(
+ [("Content-Length", "1"), ("Content-Length", "1"), ("Content-Length", "2")]
+ )
+ with pytest.raises(LocalProtocolError):
+ normalize_and_validate([("Content-Length", "1 , 1,2")])
+
+ # transfer-encoding
+ assert normalize_and_validate([("Transfer-Encoding", "chunked")]) == [
+ (b"transfer-encoding", b"chunked")
+ ]
+ assert normalize_and_validate([("Transfer-Encoding", "cHuNkEd")]) == [
+ (b"transfer-encoding", b"chunked")
+ ]
+ with pytest.raises(LocalProtocolError) as excinfo:
+ normalize_and_validate([("Transfer-Encoding", "gzip")])
+ assert excinfo.value.error_status_hint == 501 # Not Implemented
+ with pytest.raises(LocalProtocolError) as excinfo:
+ normalize_and_validate(
+ [("Transfer-Encoding", "chunked"), ("Transfer-Encoding", "gzip")]
+ )
+ assert excinfo.value.error_status_hint == 501 # Not Implemented
+
+
+def test_get_set_comma_header() -> None:
+ headers = normalize_and_validate(
+ [
+ ("Connection", "close"),
+ ("whatever", "something"),
+ ("connectiON", "fOo,, , BAR"),
+ ]
+ )
+
+ assert get_comma_header(headers, b"connection") == [b"close", b"foo", b"bar"]
+
+ headers = set_comma_header(headers, b"newthing", ["a", "b"]) # type: ignore
+
+ with pytest.raises(LocalProtocolError):
+ set_comma_header(headers, b"newthing", [" a", "b"]) # type: ignore
+
+ assert headers == [
+ (b"connection", b"close"),
+ (b"whatever", b"something"),
+ (b"connection", b"fOo,, , BAR"),
+ (b"newthing", b"a"),
+ (b"newthing", b"b"),
+ ]
+
+ headers = set_comma_header(headers, b"whatever", ["different thing"]) # type: ignore
+
+ assert headers == [
+ (b"connection", b"close"),
+ (b"connection", b"fOo,, , BAR"),
+ (b"newthing", b"a"),
+ (b"newthing", b"b"),
+ (b"whatever", b"different thing"),
+ ]
+
+
+def test_has_100_continue() -> None:
+ assert has_expect_100_continue(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Expect", "100-continue")],
+ )
+ )
+ assert not has_expect_100_continue(
+ Request(method="GET", target="/", headers=[("Host", "example.com")])
+ )
+ # Case insensitive
+ assert has_expect_100_continue(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Expect", "100-Continue")],
+ )
+ )
+ # Doesn't work in HTTP/1.0
+ assert not has_expect_100_continue(
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "example.com"), ("Expect", "100-continue")],
+ http_version="1.0",
+ )
+ )
diff --git a/.venv/Lib/site-packages/h11/tests/test_helpers.py b/.venv/Lib/site-packages/h11/tests/test_helpers.py
new file mode 100644
index 0000000..c329c76
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/test_helpers.py
@@ -0,0 +1,32 @@
+from .._events import (
+ ConnectionClosed,
+ Data,
+ EndOfMessage,
+ Event,
+ InformationalResponse,
+ Request,
+ Response,
+)
+from .helpers import normalize_data_events
+
+
+def test_normalize_data_events() -> None:
+ assert normalize_data_events(
+ [
+ Data(data=bytearray(b"1")),
+ Data(data=b"2"),
+ Response(status_code=200, headers=[]), # type: ignore[arg-type]
+ Data(data=b"3"),
+ Data(data=b"4"),
+ EndOfMessage(),
+ Data(data=b"5"),
+ Data(data=b"6"),
+ Data(data=b"7"),
+ ]
+ ) == [
+ Data(data=b"12"),
+ Response(status_code=200, headers=[]), # type: ignore[arg-type]
+ Data(data=b"34"),
+ EndOfMessage(),
+ Data(data=b"567"),
+ ]
diff --git a/.venv/Lib/site-packages/h11/tests/test_io.py b/.venv/Lib/site-packages/h11/tests/test_io.py
new file mode 100644
index 0000000..2b47c0e
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/test_io.py
@@ -0,0 +1,572 @@
+from typing import Any, Callable, Generator, List
+
+import pytest
+
+from .._events import (
+ ConnectionClosed,
+ Data,
+ EndOfMessage,
+ Event,
+ InformationalResponse,
+ Request,
+ Response,
+)
+from .._headers import Headers, normalize_and_validate
+from .._readers import (
+ _obsolete_line_fold,
+ ChunkedReader,
+ ContentLengthReader,
+ Http10Reader,
+ READERS,
+)
+from .._receivebuffer import ReceiveBuffer
+from .._state import (
+ CLIENT,
+ CLOSED,
+ DONE,
+ IDLE,
+ MIGHT_SWITCH_PROTOCOL,
+ MUST_CLOSE,
+ SEND_BODY,
+ SEND_RESPONSE,
+ SERVER,
+ SWITCHED_PROTOCOL,
+)
+from .._util import LocalProtocolError
+from .._writers import (
+ ChunkedWriter,
+ ContentLengthWriter,
+ Http10Writer,
+ write_any_response,
+ write_headers,
+ write_request,
+ WRITERS,
+)
+from .helpers import normalize_data_events
+
+SIMPLE_CASES = [
+ (
+ (CLIENT, IDLE),
+ Request(
+ method="GET",
+ target="/a",
+ headers=[("Host", "foo"), ("Connection", "close")],
+ ),
+ b"GET /a HTTP/1.1\r\nHost: foo\r\nConnection: close\r\n\r\n",
+ ),
+ (
+ (SERVER, SEND_RESPONSE),
+ Response(status_code=200, headers=[("Connection", "close")], reason=b"OK"),
+ b"HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n",
+ ),
+ (
+ (SERVER, SEND_RESPONSE),
+ Response(status_code=200, headers=[], reason=b"OK"), # type: ignore[arg-type]
+ b"HTTP/1.1 200 OK\r\n\r\n",
+ ),
+ (
+ (SERVER, SEND_RESPONSE),
+ InformationalResponse(
+ status_code=101, headers=[("Upgrade", "websocket")], reason=b"Upgrade"
+ ),
+ b"HTTP/1.1 101 Upgrade\r\nUpgrade: websocket\r\n\r\n",
+ ),
+ (
+ (SERVER, SEND_RESPONSE),
+ InformationalResponse(status_code=101, headers=[], reason=b"Upgrade"), # type: ignore[arg-type]
+ b"HTTP/1.1 101 Upgrade\r\n\r\n",
+ ),
+]
+
+
+def dowrite(writer: Callable[..., None], obj: Any) -> bytes:
+ got_list: List[bytes] = []
+ writer(obj, got_list.append)
+ return b"".join(got_list)
+
+
+def tw(writer: Any, obj: Any, expected: Any) -> None:
+ got = dowrite(writer, obj)
+ assert got == expected
+
+
+def makebuf(data: bytes) -> ReceiveBuffer:
+ buf = ReceiveBuffer()
+ buf += data
+ return buf
+
+
+def tr(reader: Any, data: bytes, expected: Any) -> None:
+ def check(got: Any) -> None:
+ assert got == expected
+ # Headers should always be returned as bytes, not e.g. bytearray
+ # https://github.com/python-hyper/wsproto/pull/54#issuecomment-377709478
+ for name, value in getattr(got, "headers", []):
+ assert type(name) is bytes
+ assert type(value) is bytes
+
+ # Simple: consume whole thing
+ buf = makebuf(data)
+ check(reader(buf))
+ assert not buf
+
+ # Incrementally growing buffer
+ buf = ReceiveBuffer()
+ for i in range(len(data)):
+ assert reader(buf) is None
+ buf += data[i : i + 1]
+ check(reader(buf))
+
+ # Trailing data
+ buf = makebuf(data)
+ buf += b"trailing"
+ check(reader(buf))
+ assert bytes(buf) == b"trailing"
+
+
+def test_writers_simple() -> None:
+ for ((role, state), event, binary) in SIMPLE_CASES:
+ tw(WRITERS[role, state], event, binary)
+
+
+def test_readers_simple() -> None:
+ for ((role, state), event, binary) in SIMPLE_CASES:
+ tr(READERS[role, state], binary, event)
+
+
+def test_writers_unusual() -> None:
+ # Simple test of the write_headers utility routine
+ tw(
+ write_headers,
+ normalize_and_validate([("foo", "bar"), ("baz", "quux")]),
+ b"foo: bar\r\nbaz: quux\r\n\r\n",
+ )
+ tw(write_headers, Headers([]), b"\r\n")
+
+ # We understand HTTP/1.0, but we don't speak it
+ with pytest.raises(LocalProtocolError):
+ tw(
+ write_request,
+ Request(
+ method="GET",
+ target="/",
+ headers=[("Host", "foo"), ("Connection", "close")],
+ http_version="1.0",
+ ),
+ None,
+ )
+ with pytest.raises(LocalProtocolError):
+ tw(
+ write_any_response,
+ Response(
+ status_code=200, headers=[("Connection", "close")], http_version="1.0"
+ ),
+ None,
+ )
+
+
+def test_readers_unusual() -> None:
+ # Reading HTTP/1.0
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.0\r\nSome: header\r\n\r\n",
+ Request(
+ method="HEAD",
+ target="/foo",
+ headers=[("Some", "header")],
+ http_version="1.0",
+ ),
+ )
+
+ # check no-headers, since it's only legal with HTTP/1.0
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.0\r\n\r\n",
+ Request(method="HEAD", target="/foo", headers=[], http_version="1.0"), # type: ignore[arg-type]
+ )
+
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200 OK\r\nSome: header\r\n\r\n",
+ Response(
+ status_code=200,
+ headers=[("Some", "header")],
+ http_version="1.0",
+ reason=b"OK",
+ ),
+ )
+
+ # single-character header values (actually disallowed by the ABNF in RFC
+ # 7230 -- this is a bug in the standard that we originally copied...)
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200 OK\r\n" b"Foo: a a a a a \r\n\r\n",
+ Response(
+ status_code=200,
+ headers=[("Foo", "a a a a a")],
+ http_version="1.0",
+ reason=b"OK",
+ ),
+ )
+
+ # Empty headers -- also legal
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200 OK\r\n" b"Foo:\r\n\r\n",
+ Response(
+ status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK"
+ ),
+ )
+
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200 OK\r\n" b"Foo: \t \t \r\n\r\n",
+ Response(
+ status_code=200, headers=[("Foo", "")], http_version="1.0", reason=b"OK"
+ ),
+ )
+
+ # Tolerate broken servers that leave off the response code
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.0 200\r\n" b"Foo: bar\r\n\r\n",
+ Response(
+ status_code=200, headers=[("Foo", "bar")], http_version="1.0", reason=b""
+ ),
+ )
+
+ # Tolerate headers line endings (\r\n and \n)
+ # \n\r\b between headers and body
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.1 200 OK\r\nSomeHeader: val\n\r\n",
+ Response(
+ status_code=200,
+ headers=[("SomeHeader", "val")],
+ http_version="1.1",
+ reason="OK",
+ ),
+ )
+
+ # delimited only with \n
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.1 200 OK\nSomeHeader1: val1\nSomeHeader2: val2\n\n",
+ Response(
+ status_code=200,
+ headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")],
+ http_version="1.1",
+ reason="OK",
+ ),
+ )
+
+ # mixed \r\n and \n
+ tr(
+ READERS[SERVER, SEND_RESPONSE],
+ b"HTTP/1.1 200 OK\r\nSomeHeader1: val1\nSomeHeader2: val2\n\r\n",
+ Response(
+ status_code=200,
+ headers=[("SomeHeader1", "val1"), ("SomeHeader2", "val2")],
+ http_version="1.1",
+ reason="OK",
+ ),
+ )
+
+ # obsolete line folding
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n"
+ b"Host: example.com\r\n"
+ b"Some: multi-line\r\n"
+ b" header\r\n"
+ b"\tnonsense\r\n"
+ b" \t \t\tI guess\r\n"
+ b"Connection: close\r\n"
+ b"More-nonsense: in the\r\n"
+ b" last header \r\n\r\n",
+ Request(
+ method="HEAD",
+ target="/foo",
+ headers=[
+ ("Host", "example.com"),
+ ("Some", "multi-line header nonsense I guess"),
+ ("Connection", "close"),
+ ("More-nonsense", "in the last header"),
+ ],
+ ),
+ )
+
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b" folded: line\r\n\r\n",
+ None,
+ )
+
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b"foo : line\r\n\r\n",
+ None,
+ )
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n",
+ None,
+ )
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b"foo\t: line\r\n\r\n",
+ None,
+ )
+ with pytest.raises(LocalProtocolError):
+ tr(READERS[CLIENT, IDLE], b"HEAD /foo HTTP/1.1\r\n" b": line\r\n\r\n", None)
+
+
+def test__obsolete_line_fold_bytes() -> None:
+ # _obsolete_line_fold has a defensive cast to bytearray, which is
+ # necessary to protect against O(n^2) behavior in case anyone ever passes
+ # in regular bytestrings... but right now we never pass in regular
+ # bytestrings. so this test just exists to get some coverage on that
+ # defensive cast.
+ assert list(_obsolete_line_fold([b"aaa", b"bbb", b" ccc", b"ddd"])) == [
+ b"aaa",
+ bytearray(b"bbb ccc"),
+ b"ddd",
+ ]
+
+
+def _run_reader_iter(
+ reader: Any, buf: bytes, do_eof: bool
+) -> Generator[Any, None, None]:
+ while True:
+ event = reader(buf)
+ if event is None:
+ break
+ yield event
+ # body readers have undefined behavior after returning EndOfMessage,
+ # because this changes the state so they don't get called again
+ if type(event) is EndOfMessage:
+ break
+ if do_eof:
+ assert not buf
+ yield reader.read_eof()
+
+
+def _run_reader(*args: Any) -> List[Event]:
+ events = list(_run_reader_iter(*args))
+ return normalize_data_events(events)
+
+
+def t_body_reader(thunk: Any, data: bytes, expected: Any, do_eof: bool = False) -> None:
+ # Simple: consume whole thing
+ print("Test 1")
+ buf = makebuf(data)
+ assert _run_reader(thunk(), buf, do_eof) == expected
+
+ # Incrementally growing buffer
+ print("Test 2")
+ reader = thunk()
+ buf = ReceiveBuffer()
+ events = []
+ for i in range(len(data)):
+ events += _run_reader(reader, buf, False)
+ buf += data[i : i + 1]
+ events += _run_reader(reader, buf, do_eof)
+ assert normalize_data_events(events) == expected
+
+ is_complete = any(type(event) is EndOfMessage for event in expected)
+ if is_complete and not do_eof:
+ buf = makebuf(data + b"trailing")
+ assert _run_reader(thunk(), buf, False) == expected
+
+
+def test_ContentLengthReader() -> None:
+ t_body_reader(lambda: ContentLengthReader(0), b"", [EndOfMessage()])
+
+ t_body_reader(
+ lambda: ContentLengthReader(10),
+ b"0123456789",
+ [Data(data=b"0123456789"), EndOfMessage()],
+ )
+
+
+def test_Http10Reader() -> None:
+ t_body_reader(Http10Reader, b"", [EndOfMessage()], do_eof=True)
+ t_body_reader(Http10Reader, b"asdf", [Data(data=b"asdf")], do_eof=False)
+ t_body_reader(
+ Http10Reader, b"asdf", [Data(data=b"asdf"), EndOfMessage()], do_eof=True
+ )
+
+
+def test_ChunkedReader() -> None:
+ t_body_reader(ChunkedReader, b"0\r\n\r\n", [EndOfMessage()])
+
+ t_body_reader(
+ ChunkedReader,
+ b"0\r\nSome: header\r\n\r\n",
+ [EndOfMessage(headers=[("Some", "header")])],
+ )
+
+ t_body_reader(
+ ChunkedReader,
+ b"5\r\n01234\r\n"
+ + b"10\r\n0123456789abcdef\r\n"
+ + b"0\r\n"
+ + b"Some: header\r\n\r\n",
+ [
+ Data(data=b"012340123456789abcdef"),
+ EndOfMessage(headers=[("Some", "header")]),
+ ],
+ )
+
+ t_body_reader(
+ ChunkedReader,
+ b"5\r\n01234\r\n" + b"10\r\n0123456789abcdef\r\n" + b"0\r\n\r\n",
+ [Data(data=b"012340123456789abcdef"), EndOfMessage()],
+ )
+
+ # handles upper and lowercase hex
+ t_body_reader(
+ ChunkedReader,
+ b"aA\r\n" + b"x" * 0xAA + b"\r\n" + b"0\r\n\r\n",
+ [Data(data=b"x" * 0xAA), EndOfMessage()],
+ )
+
+ # refuses arbitrarily long chunk integers
+ with pytest.raises(LocalProtocolError):
+ # Technically this is legal HTTP/1.1, but we refuse to process chunk
+ # sizes that don't fit into 20 characters of hex
+ t_body_reader(ChunkedReader, b"9" * 100 + b"\r\nxxx", [Data(data=b"xxx")])
+
+ # refuses garbage in the chunk count
+ with pytest.raises(LocalProtocolError):
+ t_body_reader(ChunkedReader, b"10\x00\r\nxxx", None)
+
+ # handles (and discards) "chunk extensions" omg wtf
+ t_body_reader(
+ ChunkedReader,
+ b"5; hello=there\r\n"
+ + b"xxxxx"
+ + b"\r\n"
+ + b'0; random="junk"; some=more; canbe=lonnnnngg\r\n\r\n',
+ [Data(data=b"xxxxx"), EndOfMessage()],
+ )
+
+ t_body_reader(
+ ChunkedReader,
+ b"5 \r\n01234\r\n" + b"0\r\n\r\n",
+ [Data(data=b"01234"), EndOfMessage()],
+ )
+
+
+def test_ContentLengthWriter() -> None:
+ w = ContentLengthWriter(5)
+ assert dowrite(w, Data(data=b"123")) == b"123"
+ assert dowrite(w, Data(data=b"45")) == b"45"
+ assert dowrite(w, EndOfMessage()) == b""
+
+ w = ContentLengthWriter(5)
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, Data(data=b"123456"))
+
+ w = ContentLengthWriter(5)
+ dowrite(w, Data(data=b"123"))
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, Data(data=b"456"))
+
+ w = ContentLengthWriter(5)
+ dowrite(w, Data(data=b"123"))
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, EndOfMessage())
+
+ w = ContentLengthWriter(5)
+ dowrite(w, Data(data=b"123")) == b"123"
+ dowrite(w, Data(data=b"45")) == b"45"
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, EndOfMessage(headers=[("Etag", "asdf")]))
+
+
+def test_ChunkedWriter() -> None:
+ w = ChunkedWriter()
+ assert dowrite(w, Data(data=b"aaa")) == b"3\r\naaa\r\n"
+ assert dowrite(w, Data(data=b"a" * 20)) == b"14\r\n" + b"a" * 20 + b"\r\n"
+
+ assert dowrite(w, Data(data=b"")) == b""
+
+ assert dowrite(w, EndOfMessage()) == b"0\r\n\r\n"
+
+ assert (
+ dowrite(w, EndOfMessage(headers=[("Etag", "asdf"), ("a", "b")]))
+ == b"0\r\nEtag: asdf\r\na: b\r\n\r\n"
+ )
+
+
+def test_Http10Writer() -> None:
+ w = Http10Writer()
+ assert dowrite(w, Data(data=b"1234")) == b"1234"
+ assert dowrite(w, EndOfMessage()) == b""
+
+ with pytest.raises(LocalProtocolError):
+ dowrite(w, EndOfMessage(headers=[("Etag", "asdf")]))
+
+
+def test_reject_garbage_after_request_line() -> None:
+ with pytest.raises(LocalProtocolError):
+ tr(READERS[SERVER, SEND_RESPONSE], b"HTTP/1.0 200 OK\x00xxxx\r\n\r\n", None)
+
+
+def test_reject_garbage_after_response_line() -> None:
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1 xxxxxx\r\n" b"Host: a\r\n\r\n",
+ None,
+ )
+
+
+def test_reject_garbage_in_header_line() -> None:
+ with pytest.raises(LocalProtocolError):
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n" b"Host: foo\x00bar\r\n\r\n",
+ None,
+ )
+
+
+def test_reject_non_vchar_in_path() -> None:
+ for bad_char in b"\x00\x20\x7f\xee":
+ message = bytearray(b"HEAD /")
+ message.append(bad_char)
+ message.extend(b" HTTP/1.1\r\nHost: foobar\r\n\r\n")
+ with pytest.raises(LocalProtocolError):
+ tr(READERS[CLIENT, IDLE], message, None)
+
+
+# https://github.com/python-hyper/h11/issues/57
+def test_allow_some_garbage_in_cookies() -> None:
+ tr(
+ READERS[CLIENT, IDLE],
+ b"HEAD /foo HTTP/1.1\r\n"
+ b"Host: foo\r\n"
+ b"Set-Cookie: ___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900\r\n"
+ b"\r\n",
+ Request(
+ method="HEAD",
+ target="/foo",
+ headers=[
+ ("Host", "foo"),
+ ("Set-Cookie", "___utmvafIumyLc=kUd\x01UpAt; path=/; Max-Age=900"),
+ ],
+ ),
+ )
+
+
+def test_host_comes_first() -> None:
+ tw(
+ write_headers,
+ normalize_and_validate([("foo", "bar"), ("Host", "example.com")]),
+ b"Host: example.com\r\nfoo: bar\r\n\r\n",
+ )
diff --git a/.venv/Lib/site-packages/h11/tests/test_receivebuffer.py b/.venv/Lib/site-packages/h11/tests/test_receivebuffer.py
new file mode 100644
index 0000000..21a3870
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/test_receivebuffer.py
@@ -0,0 +1,135 @@
+import re
+from typing import Tuple
+
+import pytest
+
+from .._receivebuffer import ReceiveBuffer
+
+
+def test_receivebuffer() -> None:
+ b = ReceiveBuffer()
+ assert not b
+ assert len(b) == 0
+ assert bytes(b) == b""
+
+ b += b"123"
+ assert b
+ assert len(b) == 3
+ assert bytes(b) == b"123"
+
+ assert bytes(b) == b"123"
+
+ assert b.maybe_extract_at_most(2) == b"12"
+ assert b
+ assert len(b) == 1
+ assert bytes(b) == b"3"
+
+ assert bytes(b) == b"3"
+
+ assert b.maybe_extract_at_most(10) == b"3"
+ assert bytes(b) == b""
+
+ assert b.maybe_extract_at_most(10) is None
+ assert not b
+
+ ################################################################
+ # maybe_extract_until_next
+ ################################################################
+
+ b += b"123\n456\r\n789\r\n"
+
+ assert b.maybe_extract_next_line() == b"123\n456\r\n"
+ assert bytes(b) == b"789\r\n"
+
+ assert b.maybe_extract_next_line() == b"789\r\n"
+ assert bytes(b) == b""
+
+ b += b"12\r"
+ assert b.maybe_extract_next_line() is None
+ assert bytes(b) == b"12\r"
+
+ b += b"345\n\r"
+ assert b.maybe_extract_next_line() is None
+ assert bytes(b) == b"12\r345\n\r"
+
+ # here we stopped at the middle of b"\r\n" delimiter
+
+ b += b"\n6789aaa123\r\n"
+ assert b.maybe_extract_next_line() == b"12\r345\n\r\n"
+ assert b.maybe_extract_next_line() == b"6789aaa123\r\n"
+ assert b.maybe_extract_next_line() is None
+ assert bytes(b) == b""
+
+ ################################################################
+ # maybe_extract_lines
+ ################################################################
+
+ b += b"123\r\na: b\r\nfoo:bar\r\n\r\ntrailing"
+ lines = b.maybe_extract_lines()
+ assert lines == [b"123", b"a: b", b"foo:bar"]
+ assert bytes(b) == b"trailing"
+
+ assert b.maybe_extract_lines() is None
+
+ b += b"\r\n\r"
+ assert b.maybe_extract_lines() is None
+
+ assert b.maybe_extract_at_most(100) == b"trailing\r\n\r"
+ assert not b
+
+ # Empty body case (as happens at the end of chunked encoding if there are
+ # no trailing headers, e.g.)
+ b += b"\r\ntrailing"
+ assert b.maybe_extract_lines() == []
+ assert bytes(b) == b"trailing"
+
+
+@pytest.mark.parametrize(
+ "data",
+ [
+ pytest.param(
+ (
+ b"HTTP/1.1 200 OK\r\n",
+ b"Content-type: text/plain\r\n",
+ b"Connection: close\r\n",
+ b"\r\n",
+ b"Some body",
+ ),
+ id="with_crlf_delimiter",
+ ),
+ pytest.param(
+ (
+ b"HTTP/1.1 200 OK\n",
+ b"Content-type: text/plain\n",
+ b"Connection: close\n",
+ b"\n",
+ b"Some body",
+ ),
+ id="with_lf_only_delimiter",
+ ),
+ pytest.param(
+ (
+ b"HTTP/1.1 200 OK\n",
+ b"Content-type: text/plain\r\n",
+ b"Connection: close\n",
+ b"\n",
+ b"Some body",
+ ),
+ id="with_mixed_crlf_and_lf",
+ ),
+ ],
+)
+def test_receivebuffer_for_invalid_delimiter(data: Tuple[bytes]) -> None:
+ b = ReceiveBuffer()
+
+ for line in data:
+ b += line
+
+ lines = b.maybe_extract_lines()
+
+ assert lines == [
+ b"HTTP/1.1 200 OK",
+ b"Content-type: text/plain",
+ b"Connection: close",
+ ]
+ assert bytes(b) == b"Some body"
diff --git a/.venv/Lib/site-packages/h11/tests/test_state.py b/.venv/Lib/site-packages/h11/tests/test_state.py
new file mode 100644
index 0000000..bc974e6
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/test_state.py
@@ -0,0 +1,271 @@
+import pytest
+
+from .._events import (
+ ConnectionClosed,
+ Data,
+ EndOfMessage,
+ Event,
+ InformationalResponse,
+ Request,
+ Response,
+)
+from .._state import (
+ _SWITCH_CONNECT,
+ _SWITCH_UPGRADE,
+ CLIENT,
+ CLOSED,
+ ConnectionState,
+ DONE,
+ IDLE,
+ MIGHT_SWITCH_PROTOCOL,
+ MUST_CLOSE,
+ SEND_BODY,
+ SEND_RESPONSE,
+ SERVER,
+ SWITCHED_PROTOCOL,
+)
+from .._util import LocalProtocolError
+
+
+def test_ConnectionState() -> None:
+ cs = ConnectionState()
+
+ # Basic event-triggered transitions
+
+ assert cs.states == {CLIENT: IDLE, SERVER: IDLE}
+
+ cs.process_event(CLIENT, Request)
+ # The SERVER-Request special case:
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ # Illegal transitions raise an error and nothing happens
+ with pytest.raises(LocalProtocolError):
+ cs.process_event(CLIENT, Request)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, InformationalResponse)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, Response)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_BODY}
+
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(SERVER, EndOfMessage)
+ assert cs.states == {CLIENT: DONE, SERVER: DONE}
+
+ # State-triggered transition
+
+ cs.process_event(SERVER, ConnectionClosed)
+ assert cs.states == {CLIENT: MUST_CLOSE, SERVER: CLOSED}
+
+
+def test_ConnectionState_keep_alive() -> None:
+ # keep_alive = False
+ cs = ConnectionState()
+ cs.process_event(CLIENT, Request)
+ cs.process_keep_alive_disabled()
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+ assert cs.states == {CLIENT: MUST_CLOSE, SERVER: MUST_CLOSE}
+
+
+def test_ConnectionState_keep_alive_in_DONE() -> None:
+ # Check that if keep_alive is disabled when the CLIENT is already in DONE,
+ # then this is sufficient to immediately trigger the DONE -> MUST_CLOSE
+ # transition
+ cs = ConnectionState()
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states[CLIENT] is DONE
+ cs.process_keep_alive_disabled()
+ assert cs.states[CLIENT] is MUST_CLOSE
+
+
+def test_ConnectionState_switch_denied() -> None:
+ for switch_type in (_SWITCH_CONNECT, _SWITCH_UPGRADE):
+ for deny_early in (True, False):
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(switch_type)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, Data)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ assert switch_type in cs.pending_switch_proposals
+
+ if deny_early:
+ # before client reaches DONE
+ cs.process_event(SERVER, Response)
+ assert not cs.pending_switch_proposals
+
+ cs.process_event(CLIENT, EndOfMessage)
+
+ if deny_early:
+ assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY}
+ else:
+ assert cs.states == {
+ CLIENT: MIGHT_SWITCH_PROTOCOL,
+ SERVER: SEND_RESPONSE,
+ }
+
+ cs.process_event(SERVER, InformationalResponse)
+ assert cs.states == {
+ CLIENT: MIGHT_SWITCH_PROTOCOL,
+ SERVER: SEND_RESPONSE,
+ }
+
+ cs.process_event(SERVER, Response)
+ assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY}
+ assert not cs.pending_switch_proposals
+
+
+_response_type_for_switch = {
+ _SWITCH_UPGRADE: InformationalResponse,
+ _SWITCH_CONNECT: Response,
+ None: Response,
+}
+
+
+def test_ConnectionState_protocol_switch_accepted() -> None:
+ for switch_event in [_SWITCH_UPGRADE, _SWITCH_CONNECT]:
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(switch_event)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, Data)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, InformationalResponse)
+ assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
+
+ cs.process_event(SERVER, _response_type_for_switch[switch_event], switch_event)
+ assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL}
+
+
+def test_ConnectionState_double_protocol_switch() -> None:
+ # CONNECT + Upgrade is legal! Very silly, but legal. So we support
+ # it. Because sometimes doing the silly thing is easier than not.
+ for server_switch in [None, _SWITCH_UPGRADE, _SWITCH_CONNECT]:
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(_SWITCH_UPGRADE)
+ cs.process_client_switch_proposal(_SWITCH_CONNECT)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
+ cs.process_event(
+ SERVER, _response_type_for_switch[server_switch], server_switch
+ )
+ if server_switch is None:
+ assert cs.states == {CLIENT: DONE, SERVER: SEND_BODY}
+ else:
+ assert cs.states == {CLIENT: SWITCHED_PROTOCOL, SERVER: SWITCHED_PROTOCOL}
+
+
+def test_ConnectionState_inconsistent_protocol_switch() -> None:
+ for client_switches, server_switch in [
+ ([], _SWITCH_CONNECT),
+ ([], _SWITCH_UPGRADE),
+ ([_SWITCH_UPGRADE], _SWITCH_CONNECT),
+ ([_SWITCH_CONNECT], _SWITCH_UPGRADE),
+ ]:
+ cs = ConnectionState()
+ for client_switch in client_switches: # type: ignore[attr-defined]
+ cs.process_client_switch_proposal(client_switch)
+ cs.process_event(CLIENT, Request)
+ with pytest.raises(LocalProtocolError):
+ cs.process_event(SERVER, Response, server_switch)
+
+
+def test_ConnectionState_keepalive_protocol_switch_interaction() -> None:
+ # keep_alive=False + pending_switch_proposals
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(_SWITCH_UPGRADE)
+ cs.process_event(CLIENT, Request)
+ cs.process_keep_alive_disabled()
+ cs.process_event(CLIENT, Data)
+ assert cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE}
+
+ # the protocol switch "wins"
+ cs.process_event(CLIENT, EndOfMessage)
+ assert cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE}
+
+ # but when the server denies the request, keep_alive comes back into play
+ cs.process_event(SERVER, Response)
+ assert cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_BODY}
+
+
+def test_ConnectionState_reuse() -> None:
+ cs = ConnectionState()
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+
+ cs.start_next_cycle()
+ assert cs.states == {CLIENT: IDLE, SERVER: IDLE}
+
+ # No keepalive
+
+ cs.process_event(CLIENT, Request)
+ cs.process_keep_alive_disabled()
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ # One side closed
+
+ cs = ConnectionState()
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(CLIENT, ConnectionClosed)
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ # Succesful protocol switch
+
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(_SWITCH_UPGRADE)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(SERVER, InformationalResponse, _SWITCH_UPGRADE)
+
+ with pytest.raises(LocalProtocolError):
+ cs.start_next_cycle()
+
+ # Failed protocol switch
+
+ cs = ConnectionState()
+ cs.process_client_switch_proposal(_SWITCH_UPGRADE)
+ cs.process_event(CLIENT, Request)
+ cs.process_event(CLIENT, EndOfMessage)
+ cs.process_event(SERVER, Response)
+ cs.process_event(SERVER, EndOfMessage)
+
+ cs.start_next_cycle()
+ assert cs.states == {CLIENT: IDLE, SERVER: IDLE}
+
+
+def test_server_request_is_illegal() -> None:
+ # There used to be a bug in how we handled the Request special case that
+ # made this allowed...
+ cs = ConnectionState()
+ with pytest.raises(LocalProtocolError):
+ cs.process_event(SERVER, Request)
diff --git a/.venv/Lib/site-packages/h11/tests/test_util.py b/.venv/Lib/site-packages/h11/tests/test_util.py
new file mode 100644
index 0000000..79bc095
--- /dev/null
+++ b/.venv/Lib/site-packages/h11/tests/test_util.py
@@ -0,0 +1,112 @@
+import re
+import sys
+import traceback
+from typing import NoReturn
+
+import pytest
+
+from .._util import (
+ bytesify,
+ LocalProtocolError,
+ ProtocolError,
+ RemoteProtocolError,
+ Sentinel,
+ validate,
+)
+
+
+def test_ProtocolError() -> None:
+ with pytest.raises(TypeError):
+ ProtocolError("abstract base class")
+
+
+def test_LocalProtocolError() -> None:
+ try:
+ raise LocalProtocolError("foo")
+ except LocalProtocolError as e:
+ assert str(e) == "foo"
+ assert e.error_status_hint == 400
+
+ try:
+ raise LocalProtocolError("foo", error_status_hint=418)
+ except LocalProtocolError as e:
+ assert str(e) == "foo"
+ assert e.error_status_hint == 418
+
+ def thunk() -> NoReturn:
+ raise LocalProtocolError("a", error_status_hint=420)
+
+ try:
+ try:
+ thunk()
+ except LocalProtocolError as exc1:
+ orig_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
+ exc1._reraise_as_remote_protocol_error()
+ except RemoteProtocolError as exc2:
+ assert type(exc2) is RemoteProtocolError
+ assert exc2.args == ("a",)
+ assert exc2.error_status_hint == 420
+ new_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
+ assert new_traceback.endswith(orig_traceback)
+
+
+def test_validate() -> None:
+ my_re = re.compile(rb"(?P[0-9]+)\.(?P[0-9]+)")
+ with pytest.raises(LocalProtocolError):
+ validate(my_re, b"0.")
+
+ groups = validate(my_re, b"0.1")
+ assert groups == {"group1": b"0", "group2": b"1"}
+
+ # successful partial matches are an error - must match whole string
+ with pytest.raises(LocalProtocolError):
+ validate(my_re, b"0.1xx")
+ with pytest.raises(LocalProtocolError):
+ validate(my_re, b"0.1\n")
+
+
+def test_validate_formatting() -> None:
+ my_re = re.compile(rb"foo")
+
+ with pytest.raises(LocalProtocolError) as excinfo:
+ validate(my_re, b"", "oops")
+ assert "oops" in str(excinfo.value)
+
+ with pytest.raises(LocalProtocolError) as excinfo:
+ validate(my_re, b"", "oops {}")
+ assert "oops {}" in str(excinfo.value)
+
+ with pytest.raises(LocalProtocolError) as excinfo:
+ validate(my_re, b"", "oops {} xx", 10)
+ assert "oops 10 xx" in str(excinfo.value)
+
+
+def test_make_sentinel() -> None:
+ class S(Sentinel, metaclass=Sentinel):
+ pass
+
+ assert repr(S) == "S"
+ assert S == S
+ assert type(S).__name__ == "S"
+ assert S in {S}
+ assert type(S) is S
+
+ class S2(Sentinel, metaclass=Sentinel):
+ pass
+
+ assert repr(S2) == "S2"
+ assert S != S2
+ assert S not in {S2}
+ assert type(S) is not type(S2)
+
+
+def test_bytesify() -> None:
+ assert bytesify(b"123") == b"123"
+ assert bytesify(bytearray(b"123")) == b"123"
+ assert bytesify("123") == b"123"
+
+ with pytest.raises(UnicodeEncodeError):
+ bytesify("\u1234")
+
+ with pytest.raises(TypeError):
+ bytesify(10)
diff --git a/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/INSTALLER b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/LICENSE b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/LICENSE
new file mode 100644
index 0000000..c22109a
--- /dev/null
+++ b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Miguel Grinberg
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/METADATA b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/METADATA
new file mode 100644
index 0000000..87858f4
--- /dev/null
+++ b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/METADATA
@@ -0,0 +1,48 @@
+Metadata-Version: 2.1
+Name: python-engineio
+Version: 4.9.1
+Summary: Engine.IO server and client for Python
+Author-email: Miguel Grinberg
+Project-URL: Homepage, https://github.com/miguelgrinberg/python-engineio
+Project-URL: Bug Tracker, https://github.com/miguelgrinberg/python-engineio/issues
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Requires-Python: >=3.6
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: simple-websocket >=0.10.0
+Provides-Extra: asyncio_client
+Requires-Dist: aiohttp >=3.4 ; extra == 'asyncio_client'
+Provides-Extra: client
+Requires-Dist: requests >=2.21.0 ; extra == 'client'
+Requires-Dist: websocket-client >=0.54.0 ; extra == 'client'
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+
+python-engineio
+===============
+
+[](https://github.com/miguelgrinberg/python-engineio/actions) [](https://codecov.io/gh/miguelgrinberg/python-engineio)
+
+Python implementation of the `Engine.IO` realtime client and server.
+
+Sponsors
+--------
+
+The following organizations are funding this project:
+
+ [Socket.IO](https://socket.io) | [Add your company here!](https://github.com/sponsors/miguelgrinberg)|
+-|-
+
+Many individual sponsors also support this project through small ongoing contributions. Why not [join them](https://github.com/sponsors/miguelgrinberg)?
+
+Resources
+---------
+
+- [Documentation](https://python-engineio.readthedocs.io/)
+- [PyPI](https://pypi.python.org/pypi/python-engineio)
+- [Change Log](https://github.com/miguelgrinberg/python-engineio/blob/main/CHANGES.md)
+- Questions? See the [questions](https://stackoverflow.com/questions/tagged/python-socketio) others have asked on Stack Overflow, or [ask](https://stackoverflow.com/questions/ask?tags=python+python-socketio) your own question.
diff --git a/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/RECORD b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/RECORD
new file mode 100644
index 0000000..408c6b6
--- /dev/null
+++ b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/RECORD
@@ -0,0 +1,58 @@
+engineio/__init__.py,sha256=0R2PY1EXu3sicP7mkA0_QxEVGRlFlgvsxfhByqREE1A,481
+engineio/__pycache__/__init__.cpython-312.pyc,,
+engineio/__pycache__/async_client.cpython-312.pyc,,
+engineio/__pycache__/async_server.cpython-312.pyc,,
+engineio/__pycache__/async_socket.cpython-312.pyc,,
+engineio/__pycache__/base_client.cpython-312.pyc,,
+engineio/__pycache__/base_server.cpython-312.pyc,,
+engineio/__pycache__/base_socket.cpython-312.pyc,,
+engineio/__pycache__/client.cpython-312.pyc,,
+engineio/__pycache__/exceptions.cpython-312.pyc,,
+engineio/__pycache__/json.cpython-312.pyc,,
+engineio/__pycache__/middleware.cpython-312.pyc,,
+engineio/__pycache__/packet.cpython-312.pyc,,
+engineio/__pycache__/payload.cpython-312.pyc,,
+engineio/__pycache__/server.cpython-312.pyc,,
+engineio/__pycache__/socket.cpython-312.pyc,,
+engineio/__pycache__/static_files.cpython-312.pyc,,
+engineio/async_client.py,sha256=QyHBWpLZxfBc4lK_eodkPM0xzsfVYpt7mzNaA9uG3cg,27932
+engineio/async_drivers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+engineio/async_drivers/__pycache__/__init__.cpython-312.pyc,,
+engineio/async_drivers/__pycache__/_websocket_wsgi.cpython-312.pyc,,
+engineio/async_drivers/__pycache__/aiohttp.cpython-312.pyc,,
+engineio/async_drivers/__pycache__/asgi.cpython-312.pyc,,
+engineio/async_drivers/__pycache__/eventlet.cpython-312.pyc,,
+engineio/async_drivers/__pycache__/gevent.cpython-312.pyc,,
+engineio/async_drivers/__pycache__/gevent_uwsgi.cpython-312.pyc,,
+engineio/async_drivers/__pycache__/sanic.cpython-312.pyc,,
+engineio/async_drivers/__pycache__/threading.cpython-312.pyc,,
+engineio/async_drivers/__pycache__/tornado.cpython-312.pyc,,
+engineio/async_drivers/_websocket_wsgi.py,sha256=FGoRBGOUsEtlfklYNylR0o3oLOGmXIR8QzaCsGLgr3I,949
+engineio/async_drivers/aiohttp.py,sha256=zJdujjO6dSL_oKDDr4xfO4ID_Vd8faEp1AOIW4ogKME,3768
+engineio/async_drivers/asgi.py,sha256=CZ_XOiI83vXm_p6_6VqaxocwvgVAUhphtAsZaIyI3_E,10837
+engineio/async_drivers/eventlet.py,sha256=IG6oLaWH663dw5CEnO-SRz6IsJb6lO55kRCYTdmqZE0,1755
+engineio/async_drivers/gevent.py,sha256=hnJHeWdDQE2jfoLCP5DnwVPzsQlcTLJUMA5EVf1UL-k,2962
+engineio/async_drivers/gevent_uwsgi.py,sha256=cnjCsnDHTa6rKgwDKD6rLvIw1Yun-g4c1QbujOC_bMY,5962
+engineio/async_drivers/sanic.py,sha256=SY0HIp5DUHF7B55tJCBB_8qDjrRTD_FyNQ2cIwRIGR8,4538
+engineio/async_drivers/threading.py,sha256=ywmG59d4H6OHZjKarBN97-9BHEsRxFEz9YN-E9QAu_I,463
+engineio/async_drivers/tornado.py,sha256=9bB7FvY47Snx_h4rsNwRk5wIINf2ju7hXWTAqF3intA,5909
+engineio/async_server.py,sha256=dOhDfyPY817RMzEFq9U-IH8e0b7NCEFKTi-c8zb-UCw,25098
+engineio/async_socket.py,sha256=P8OZW1N5y7jr2hW1qLLK2y_Clt7aHdv2ucYW69mbU4Q,10305
+engineio/base_client.py,sha256=Q_w0Stvy89wPHwWk7411dRxpGjxNPypm4xy_4XMPZ9M,4872
+engineio/base_server.py,sha256=Em2RRpbohulKjmYTWHnf_lALKvTT_jHr442Rr2g_BEQ,14013
+engineio/base_socket.py,sha256=Bw6TWv1pnlXcDdqT59C5CyTaBi7l-_mkykIUuZLjz-g,400
+engineio/client.py,sha256=79mpLSZT1m-rSu0EcfvOR6TtJbJ6ncCsO0nF7l7WW9A,26317
+engineio/exceptions.py,sha256=FyuMb5qhX9CUYP3fEoe1m-faU96ApdQTSbblaaoo8LA,292
+engineio/json.py,sha256=SG5FTojqd1ix6u0dKXJsZVqqdYioZLO4S2GPL7BKl3U,405
+engineio/middleware.py,sha256=BF_qHAIZZnIbfiP256SD1CX3kzNWbSuto1cpih8oIFg,3766
+engineio/packet.py,sha256=ETMeLgdpZghXK9fth93IZO8pIft6Sg3d1QGpyTx4xBE,3189
+engineio/payload.py,sha256=2iLIFgIweTWkLok_UZ5zCgELmRSGyUUI5eeYcEerFSs,1547
+engineio/server.py,sha256=UegXydKobZaR1O0MxS_MENKNF7PBjpP_kPI0-OpI5EQ,21558
+engineio/socket.py,sha256=dasec3jXoV2eR1jmIrhEIfHzyUuuv8FwvxLB3DpyjeY,9996
+engineio/static_files.py,sha256=pwez9LQFaSQXMbtI0vLyD6UDiokQ4rNfmRYgVLKOthc,2064
+python_engineio-4.9.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+python_engineio-4.9.1.dist-info/LICENSE,sha256=yel9Pbwfu82094CLKCzWRtuIev9PUxP-a76NTDFAWpw,1082
+python_engineio-4.9.1.dist-info/METADATA,sha256=MNHZzYM4BsSNebhsXVslP-kaHg08S74pkLABnPxDRYw,2244
+python_engineio-4.9.1.dist-info/RECORD,,
+python_engineio-4.9.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
+python_engineio-4.9.1.dist-info/top_level.txt,sha256=u8PmNisCZLwRYcWrNLe9wutQ2tt4zNi8IH362c-HWuA,9
diff --git a/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/WHEEL b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/WHEEL
new file mode 100644
index 0000000..bab98d6
--- /dev/null
+++ b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.43.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/top_level.txt b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/top_level.txt
new file mode 100644
index 0000000..8f23d7e
--- /dev/null
+++ b/.venv/Lib/site-packages/python_engineio-4.9.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+engineio
diff --git a/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/INSTALLER b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/LICENSE b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/LICENSE
new file mode 100644
index 0000000..c22109a
--- /dev/null
+++ b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Miguel Grinberg
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/METADATA b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/METADATA
new file mode 100644
index 0000000..cfc44c2
--- /dev/null
+++ b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/METADATA
@@ -0,0 +1,71 @@
+Metadata-Version: 2.1
+Name: python-socketio
+Version: 5.11.3
+Summary: Socket.IO server and client for Python
+Author-email: Miguel Grinberg
+Project-URL: Homepage, https://github.com/miguelgrinberg/python-socketio
+Project-URL: Bug Tracker, https://github.com/miguelgrinberg/python-socketio/issues
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Requires-Python: >=3.8
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: bidict >=0.21.0
+Requires-Dist: python-engineio >=4.8.0
+Provides-Extra: asyncio_client
+Requires-Dist: aiohttp >=3.4 ; extra == 'asyncio_client'
+Provides-Extra: client
+Requires-Dist: requests >=2.21.0 ; extra == 'client'
+Requires-Dist: websocket-client >=0.54.0 ; extra == 'client'
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+
+python-socketio
+===============
+
+[](https://github.com/miguelgrinberg/python-socketio/actions) [](https://codecov.io/gh/miguelgrinberg/python-socketio)
+
+Python implementation of the `Socket.IO` realtime client and server.
+
+Sponsors
+--------
+
+The following organizations are funding this project:
+
+ [Socket.IO](https://socket.io) | [Add your company here!](https://github.com/sponsors/miguelgrinberg)|
+-|-
+
+Many individual sponsors also support this project through small ongoing contributions. Why not [join them](https://github.com/sponsors/miguelgrinberg)?
+
+Version compatibility
+---------------------
+
+The Socket.IO protocol has been through a number of revisions, and some of these
+introduced backward incompatible changes, which means that the client and the
+server must use compatible versions for everything to work.
+
+If you are using the Python client and server, the easiest way to ensure compatibility
+is to use the same version of this package for the client and the server. If you are
+using this package with a different client or server, then you must ensure the
+versions are compatible.
+
+The version compatibility chart below maps versions of this package to versions
+of the JavaScript reference implementation and the versions of the Socket.IO and
+Engine.IO protocols.
+
+JavaScript Socket.IO version | Socket.IO protocol revision | Engine.IO protocol revision | python-socketio version
+-|-|-|-
+0.9.x | 1, 2 | 1, 2 | Not supported
+1.x and 2.x | 3, 4 | 3 | 4.x
+3.x and 4.x | 5 | 4 | 5.x
+
+Resources
+---------
+
+- [Documentation](http://python-socketio.readthedocs.io/)
+- [PyPI](https://pypi.python.org/pypi/python-socketio)
+- [Change Log](https://github.com/miguelgrinberg/python-socketio/blob/main/CHANGES.md)
+- Questions? See the [questions](https://stackoverflow.com/questions/tagged/python-socketio) others have asked on Stack Overflow, or [ask](https://stackoverflow.com/questions/ask?tags=python+python-socketio) your own question.
diff --git a/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/RECORD b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/RECORD
new file mode 100644
index 0000000..0358029
--- /dev/null
+++ b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/RECORD
@@ -0,0 +1,68 @@
+python_socketio-5.11.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+python_socketio-5.11.3.dist-info/LICENSE,sha256=yel9Pbwfu82094CLKCzWRtuIev9PUxP-a76NTDFAWpw,1082
+python_socketio-5.11.3.dist-info/METADATA,sha256=fNgrmascXomQ_zS4i4eR7cLnxSbi-GXMbG9DXTfTTCI,3213
+python_socketio-5.11.3.dist-info/RECORD,,
+python_socketio-5.11.3.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
+python_socketio-5.11.3.dist-info/top_level.txt,sha256=xWd-HVUanhys_VzQQTRTRZBX8W448ayFytYf1Zffivs,9
+socketio/__init__.py,sha256=DXxtwPIqHFIqV4BGTgJ86OvCXD6Mth3PxBYhFoJ1_7g,1269
+socketio/__pycache__/__init__.cpython-312.pyc,,
+socketio/__pycache__/admin.cpython-312.pyc,,
+socketio/__pycache__/asgi.cpython-312.pyc,,
+socketio/__pycache__/async_admin.cpython-312.pyc,,
+socketio/__pycache__/async_aiopika_manager.cpython-312.pyc,,
+socketio/__pycache__/async_client.cpython-312.pyc,,
+socketio/__pycache__/async_manager.cpython-312.pyc,,
+socketio/__pycache__/async_namespace.cpython-312.pyc,,
+socketio/__pycache__/async_pubsub_manager.cpython-312.pyc,,
+socketio/__pycache__/async_redis_manager.cpython-312.pyc,,
+socketio/__pycache__/async_server.cpython-312.pyc,,
+socketio/__pycache__/async_simple_client.cpython-312.pyc,,
+socketio/__pycache__/base_client.cpython-312.pyc,,
+socketio/__pycache__/base_manager.cpython-312.pyc,,
+socketio/__pycache__/base_namespace.cpython-312.pyc,,
+socketio/__pycache__/base_server.cpython-312.pyc,,
+socketio/__pycache__/client.cpython-312.pyc,,
+socketio/__pycache__/exceptions.cpython-312.pyc,,
+socketio/__pycache__/kafka_manager.cpython-312.pyc,,
+socketio/__pycache__/kombu_manager.cpython-312.pyc,,
+socketio/__pycache__/manager.cpython-312.pyc,,
+socketio/__pycache__/middleware.cpython-312.pyc,,
+socketio/__pycache__/msgpack_packet.cpython-312.pyc,,
+socketio/__pycache__/namespace.cpython-312.pyc,,
+socketio/__pycache__/packet.cpython-312.pyc,,
+socketio/__pycache__/pubsub_manager.cpython-312.pyc,,
+socketio/__pycache__/redis_manager.cpython-312.pyc,,
+socketio/__pycache__/server.cpython-312.pyc,,
+socketio/__pycache__/simple_client.cpython-312.pyc,,
+socketio/__pycache__/tornado.cpython-312.pyc,,
+socketio/__pycache__/zmq_manager.cpython-312.pyc,,
+socketio/admin.py,sha256=9B601337UMTh2NIFzVrlFyImmSEtegJoXYFDlEwGmYk,16473
+socketio/asgi.py,sha256=NaJtYhOswVVcwHU0zcMM5H5TrSzXq9K-CAYaeSNTZRY,2192
+socketio/async_admin.py,sha256=Swn4s154pc7QTWCWNEbSM2psAmhEuvgobIn1Vf7Y0y8,16850
+socketio/async_aiopika_manager.py,sha256=DaBUjGRYaNIsOsk2xNjWylUsz2egmTAFFUiQkV6mNmk,5193
+socketio/async_client.py,sha256=6MSlSIfIS8-bULGKgnjETfqLc3s4UsZsnTRo2H07zxM,26968
+socketio/async_manager.py,sha256=JiKiI01wOKsX6_v4VJpgBvlxYFDnCA3vpA_lGDuA0YI,4468
+socketio/async_namespace.py,sha256=fuIuIQDUL-lXyD9lm70W2QZr1wA_UMkOb-hQzE5P6u4,10616
+socketio/async_pubsub_manager.py,sha256=kN1G7dM4smVqk9OK85mZJ0eqG-5D8JBkvBW8fIQ70Io,11106
+socketio/async_redis_manager.py,sha256=UZXKunvbSk8neRVhGqigQF5S0WwLYTKV0BKondnV_yY,4299
+socketio/async_server.py,sha256=HBp6yT8NPTSOWMw_Nd5G7OQMXtFSjOsJvutI1MkyxaA,35506
+socketio/async_simple_client.py,sha256=Dj2h0iRR1qZ4BhOV6gpzvDM0K5XO4f-vdxmISiREzhQ,8908
+socketio/base_client.py,sha256=bW2zRm7pjAGlB37iV62363zOl1er3z8I4EroCgeA6PQ,11576
+socketio/base_manager.py,sha256=DJLH6IbJpaJ38jCrmIaUQVj8WUKZDYtfzjKIbQslLjw,5768
+socketio/base_namespace.py,sha256=PpBylO78iyOJ6st5IHQeeXTvD7XzUim5MNDzdKjISxU,978
+socketio/base_server.py,sha256=GS62Am34sQr43v5YDTujE8zt00oyP3XjLy9dtSGOAB0,10584
+socketio/client.py,sha256=s5tU1gsxjEhQyTUk0F2e600IEALp3PcCx8vIK9k-y0Q,25471
+socketio/exceptions.py,sha256=c8yKss_oJl-fkL52X_AagyJecL-9Mxlgb5xDRqSz5tA,975
+socketio/kafka_manager.py,sha256=OCUBlntnqAOlqZn7YxxM0E4rt6VLd_b-wJWrVWKRR-A,2419
+socketio/kombu_manager.py,sha256=qFzWOUlsIznNx2IYKMvA6GKrDcG0zle5_G9duanJ3Po,5747
+socketio/manager.py,sha256=wief9dt2R_OpNKDcFr4HAppCDQfc_WTqwQQ6ViCZi4k,3826
+socketio/middleware.py,sha256=P8wOgSzy3YKOcRVI-r3KNKsEejBz_f5p2wdV8ZqW12E,1591
+socketio/msgpack_packet.py,sha256=0K_XXM-OF3SdqOaLN_O5B4a1xHE6N_UhhiaRhQdseNw,514
+socketio/namespace.py,sha256=jkCi7n-bmAXYRdM-dFwKuLSmXqNc7YGF7SUzHGZzaKw,8870
+socketio/packet.py,sha256=j5zm_kcoIZQnWd9m2hQiF34p0nweGecQvMxr90W8Hrs,7077
+socketio/pubsub_manager.py,sha256=ht9dXinzUtiLldxwttih5hoMhQwIw7sBJMtVBpkcbVA,10407
+socketio/redis_manager.py,sha256=KExT3uzACK42g9OuHKvzI7YpfpGGLFMOwsWhgnPkx0Q,4442
+socketio/server.py,sha256=nOao7YVlRHyUeEei6xGCIMONI2VMEAixU5mGqSfc4M0,34291
+socketio/simple_client.py,sha256=tZiX2sAPY66OJTIJPk-PIGQjmnmUxu3RnpgJ0nc1-y8,8326
+socketio/tornado.py,sha256=R82JCqz-E1ibZAQX708h7FX3sguCHQ1OLYpnMag-LY8,295
+socketio/zmq_manager.py,sha256=JsRooJoH0dwOE0QsmDwePmwzsIN0VudsFWweCzScvMs,3545
diff --git a/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/WHEEL b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/WHEEL
new file mode 100644
index 0000000..9086d27
--- /dev/null
+++ b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (70.1.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/top_level.txt b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/top_level.txt
new file mode 100644
index 0000000..b8f5d36
--- /dev/null
+++ b/.venv/Lib/site-packages/python_socketio-5.11.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+socketio
diff --git a/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/INSTALLER b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/LICENSE b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/LICENSE
new file mode 100644
index 0000000..264533f
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Miguel Grinberg
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/METADATA b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/METADATA
new file mode 100644
index 0000000..f93782f
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/METADATA
@@ -0,0 +1,33 @@
+Metadata-Version: 2.1
+Name: simple-websocket
+Version: 1.0.0
+Summary: Simple WebSocket server and client for Python
+Home-page: https://github.com/miguelgrinberg/simple-websocket
+Author: Miguel Grinberg
+Author-email: miguel.grinberg@gmail.com
+Project-URL: Bug Tracker, https://github.com/miguelgrinberg/simple-websocket/issues
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Requires-Python: >=3.6
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: wsproto
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+
+simple-websocket
+================
+
+[](https://github.com/miguelgrinberg/simple-websocket/actions) [](https://codecov.io/gh/miguelgrinberg/simple-websocket)
+
+Simple WebSocket server and client for Python.
+
+## Resources
+
+- [Documentation](http://simple-websocket.readthedocs.io/en/latest/)
+- [PyPI](https://pypi.python.org/pypi/simple-websocket)
+- [Change Log](https://github.com/miguelgrinberg/simple-websocket/blob/main/CHANGES.md)
+
diff --git a/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/RECORD b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/RECORD
new file mode 100644
index 0000000..be5e33b
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/RECORD
@@ -0,0 +1,16 @@
+simple_websocket-1.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+simple_websocket-1.0.0.dist-info/LICENSE,sha256=S4q63MXj3SnHGQW4SVKUVpnwp7pB5q-Z6rpG-qvpW7c,1072
+simple_websocket-1.0.0.dist-info/METADATA,sha256=wZQcZ5j94bd65Fp_YWYA06_iNQ3Obd0-R63s496nLmo,1347
+simple_websocket-1.0.0.dist-info/RECORD,,
+simple_websocket-1.0.0.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
+simple_websocket-1.0.0.dist-info/top_level.txt,sha256=gslMtkYd2H3exn9JQxdAgsKBCESZDyTxmukAF9Iz5aA,17
+simple_websocket/__init__.py,sha256=EKakMkVO9vg5WlXjHEJiTwI2emAqs9q22ZxJz9vJ4co,167
+simple_websocket/__pycache__/__init__.cpython-312.pyc,,
+simple_websocket/__pycache__/aiows.cpython-312.pyc,,
+simple_websocket/__pycache__/asgi.cpython-312.pyc,,
+simple_websocket/__pycache__/errors.cpython-312.pyc,,
+simple_websocket/__pycache__/ws.cpython-312.pyc,,
+simple_websocket/aiows.py,sha256=CHIBIAN2cz004S4tPeTLAcQuT9iBgw6-hA0QD_JZD1A,20978
+simple_websocket/asgi.py,sha256=ic2tmrUI-u9vjMNzjqIORc8g7pAsGwFd9YJIjppHHVU,1823
+simple_websocket/errors.py,sha256=5tIEemK-0w5A-6Qyp6imPmn3KI5EjND3R5pRWAiKWF8,583
+simple_websocket/ws.py,sha256=24fXv4pGxAeDEazZr06l7hmzUGcyiBv6TvMPa260JNo,22301
diff --git a/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/WHEEL b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/WHEEL
new file mode 100644
index 0000000..7e68873
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.41.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/top_level.txt b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/top_level.txt
new file mode 100644
index 0000000..9959339
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket-1.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+simple_websocket
diff --git a/.venv/Lib/site-packages/simple_websocket/__init__.py b/.venv/Lib/site-packages/simple_websocket/__init__.py
new file mode 100644
index 0000000..98a5cc6
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket/__init__.py
@@ -0,0 +1,3 @@
+from .ws import Server, Client # noqa: F401
+from .aiows import AioServer, AioClient # noqa: F401
+from .errors import ConnectionError, ConnectionClosed # noqa: F401
diff --git a/.venv/Lib/site-packages/simple_websocket/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/simple_websocket/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..25dcdfb
Binary files /dev/null and b/.venv/Lib/site-packages/simple_websocket/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/simple_websocket/__pycache__/aiows.cpython-312.pyc b/.venv/Lib/site-packages/simple_websocket/__pycache__/aiows.cpython-312.pyc
new file mode 100644
index 0000000..6a54f57
Binary files /dev/null and b/.venv/Lib/site-packages/simple_websocket/__pycache__/aiows.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/simple_websocket/__pycache__/asgi.cpython-312.pyc b/.venv/Lib/site-packages/simple_websocket/__pycache__/asgi.cpython-312.pyc
new file mode 100644
index 0000000..d3ffe10
Binary files /dev/null and b/.venv/Lib/site-packages/simple_websocket/__pycache__/asgi.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/simple_websocket/__pycache__/errors.cpython-312.pyc b/.venv/Lib/site-packages/simple_websocket/__pycache__/errors.cpython-312.pyc
new file mode 100644
index 0000000..24bd994
Binary files /dev/null and b/.venv/Lib/site-packages/simple_websocket/__pycache__/errors.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/simple_websocket/__pycache__/ws.cpython-312.pyc b/.venv/Lib/site-packages/simple_websocket/__pycache__/ws.cpython-312.pyc
new file mode 100644
index 0000000..365ea55
Binary files /dev/null and b/.venv/Lib/site-packages/simple_websocket/__pycache__/ws.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/simple_websocket/aiows.py b/.venv/Lib/site-packages/simple_websocket/aiows.py
new file mode 100644
index 0000000..18ce116
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket/aiows.py
@@ -0,0 +1,467 @@
+import asyncio
+import ssl
+from time import time
+from urllib.parse import urlsplit
+
+from wsproto import ConnectionType, WSConnection
+from wsproto.events import (
+ AcceptConnection,
+ RejectConnection,
+ CloseConnection,
+ Message,
+ Request,
+ Ping,
+ Pong,
+ TextMessage,
+ BytesMessage,
+)
+from wsproto.extensions import PerMessageDeflate
+from wsproto.frame_protocol import CloseReason
+from wsproto.utilities import LocalProtocolError
+from .errors import ConnectionError, ConnectionClosed
+
+
+class AioBase:
+ def __init__(self, connection_type=None, receive_bytes=4096,
+ ping_interval=None, max_message_size=None):
+ #: The name of the subprotocol chosen for the WebSocket connection.
+ self.subprotocol = None
+
+ self.connection_type = connection_type
+ self.receive_bytes = receive_bytes
+ self.ping_interval = ping_interval
+ self.max_message_size = max_message_size
+ self.pong_received = True
+ self.input_buffer = []
+ self.incoming_message = None
+ self.incoming_message_len = 0
+ self.connected = False
+ self.is_server = (connection_type == ConnectionType.SERVER)
+ self.close_reason = CloseReason.NO_STATUS_RCVD
+ self.close_message = None
+
+ self.rsock = None
+ self.wsock = None
+ self.event = asyncio.Event()
+ self.ws = None
+ self.task = None
+
+ async def connect(self):
+ self.ws = WSConnection(self.connection_type)
+ await self.handshake()
+
+ if not self.connected: # pragma: no cover
+ raise ConnectionError()
+ self.task = asyncio.create_task(self._task())
+
+ async def handshake(self): # pragma: no cover
+ # to be implemented by subclasses
+ pass
+
+ async def send(self, data):
+ """Send data over the WebSocket connection.
+
+ :param data: The data to send. If ``data`` is of type ``bytes``, then
+ a binary message is sent. Else, the message is sent in
+ text format.
+ """
+ if not self.connected:
+ raise ConnectionClosed(self.close_reason, self.close_message)
+ if isinstance(data, bytes):
+ out_data = self.ws.send(Message(data=data))
+ else:
+ out_data = self.ws.send(TextMessage(data=str(data)))
+ self.wsock.write(out_data)
+
+ async def receive(self, timeout=None):
+ """Receive data over the WebSocket connection.
+
+ :param timeout: Amount of time to wait for the data, in seconds. Set
+ to ``None`` (the default) to wait indefinitely. Set
+ to 0 to read without blocking.
+
+ The data received is returned, as ``bytes`` or ``str``, depending on
+ the type of the incoming message.
+ """
+ while self.connected and not self.input_buffer:
+ try:
+ await asyncio.wait_for(self.event.wait(), timeout=timeout)
+ except asyncio.TimeoutError:
+ return None
+ self.event.clear() # pragma: no cover
+ try:
+ return self.input_buffer.pop(0)
+ except IndexError:
+ pass
+ if not self.connected: # pragma: no cover
+ raise ConnectionClosed(self.close_reason, self.close_message)
+
+ async def close(self, reason=None, message=None):
+ """Close the WebSocket connection.
+
+ :param reason: A numeric status code indicating the reason of the
+ closure, as defined by the WebSocket specification. The
+ default is 1000 (normal closure).
+ :param message: A text message to be sent to the other side.
+ """
+ if not self.connected:
+ raise ConnectionClosed(self.close_reason, self.close_message)
+ out_data = self.ws.send(CloseConnection(
+ reason or CloseReason.NORMAL_CLOSURE, message))
+ try:
+ self.wsock.write(out_data)
+ except BrokenPipeError: # pragma: no cover
+ pass
+ self.connected = False
+
+ def choose_subprotocol(self, request): # pragma: no cover
+ # The method should return the subprotocol to use, or ``None`` if no
+ # subprotocol is chosen. Can be overridden by subclasses that implement
+ # the server-side of the WebSocket protocol.
+ return None
+
+ async def _task(self):
+ next_ping = None
+ if self.ping_interval:
+ next_ping = time() + self.ping_interval
+
+ while self.connected:
+ try:
+ in_data = b''
+ if next_ping:
+ now = time()
+ timed_out = True
+ if next_ping > now:
+ timed_out = False
+ try:
+ in_data = await asyncio.wait_for(
+ self.rsock.read(self.receive_bytes),
+ timeout=next_ping - now)
+ except asyncio.TimeoutError:
+ timed_out = True
+ if timed_out:
+ # we reached the timeout, we have to send a ping
+ if not self.pong_received:
+ await self.close(
+ reason=CloseReason.POLICY_VIOLATION,
+ message='Ping/Pong timeout')
+ break
+ self.pong_received = False
+ self.wsock.write(self.ws.send(Ping()))
+ next_ping = max(now, next_ping) + self.ping_interval
+ continue
+ else:
+ in_data = await self.rsock.read(self.receive_bytes)
+ if len(in_data) == 0:
+ raise OSError()
+ except (OSError, ConnectionResetError): # pragma: no cover
+ self.connected = False
+ self.event.set()
+ break
+
+ self.ws.receive_data(in_data)
+ self.connected = await self._handle_events()
+ self.wsock.close()
+
+ async def _handle_events(self):
+ keep_going = True
+ out_data = b''
+ for event in self.ws.events():
+ try:
+ if isinstance(event, Request):
+ self.subprotocol = self.choose_subprotocol(event)
+ out_data += self.ws.send(AcceptConnection(
+ subprotocol=self.subprotocol,
+ extensions=[PerMessageDeflate()]))
+ elif isinstance(event, CloseConnection):
+ if self.is_server:
+ out_data += self.ws.send(event.response())
+ self.close_reason = event.code
+ self.close_message = event.reason
+ self.connected = False
+ self.event.set()
+ keep_going = False
+ elif isinstance(event, Ping):
+ out_data += self.ws.send(event.response())
+ elif isinstance(event, Pong):
+ self.pong_received = True
+ elif isinstance(event, (TextMessage, BytesMessage)):
+ self.incoming_message_len += len(event.data)
+ if self.max_message_size and \
+ self.incoming_message_len > self.max_message_size:
+ out_data += self.ws.send(CloseConnection(
+ CloseReason.MESSAGE_TOO_BIG, 'Message is too big'))
+ self.event.set()
+ keep_going = False
+ break
+ if self.incoming_message is None:
+ # store message as is first
+ # if it is the first of a group, the message will be
+ # converted to bytearray on arrival of the second
+ # part, since bytearrays are mutable and can be
+ # concatenated more efficiently
+ self.incoming_message = event.data
+ elif isinstance(event, TextMessage):
+ if not isinstance(self.incoming_message, bytearray):
+ # convert to bytearray and append
+ self.incoming_message = bytearray(
+ (self.incoming_message + event.data).encode())
+ else:
+ # append to bytearray
+ self.incoming_message += event.data.encode()
+ else:
+ if not isinstance(self.incoming_message, bytearray):
+ # convert to mutable bytearray and append
+ self.incoming_message = bytearray(
+ self.incoming_message + event.data)
+ else:
+ # append to bytearray
+ self.incoming_message += event.data
+ if not event.message_finished:
+ continue
+ if isinstance(self.incoming_message, (str, bytes)):
+ # single part message
+ self.input_buffer.append(self.incoming_message)
+ elif isinstance(event, TextMessage):
+ # convert multi-part message back to text
+ self.input_buffer.append(
+ self.incoming_message.decode())
+ else:
+ # convert multi-part message back to bytes
+ self.input_buffer.append(bytes(self.incoming_message))
+ self.incoming_message = None
+ self.incoming_message_len = 0
+ self.event.set()
+ else: # pragma: no cover
+ pass
+ except LocalProtocolError: # pragma: no cover
+ out_data = b''
+ self.event.set()
+ keep_going = False
+ if out_data:
+ self.wsock.write(out_data)
+ return keep_going
+
+
+class AioServer(AioBase):
+ """This class implements a WebSocket server.
+
+ Instead of creating an instance of this class directly, use the
+ ``accept()`` class method to create individual instances of the server,
+ each bound to a client request.
+ """
+ def __init__(self, request, subprotocols=None, receive_bytes=4096,
+ ping_interval=None, max_message_size=None):
+ super().__init__(connection_type=ConnectionType.SERVER,
+ receive_bytes=receive_bytes,
+ ping_interval=ping_interval,
+ max_message_size=max_message_size)
+ self.request = request
+ self.headers = {}
+ self.subprotocols = subprotocols or []
+ if isinstance(self.subprotocols, str):
+ self.subprotocols = [self.subprotocols]
+ self.mode = 'unknown'
+
+ @classmethod
+ async def accept(cls, aiohttp=None, asgi=None, sock=None, headers=None,
+ subprotocols=None, receive_bytes=4096, ping_interval=None,
+ max_message_size=None):
+ """Accept a WebSocket connection from a client.
+
+ :param aiohttp: The request object from aiohttp. If this argument is
+ provided, ``asgi``, ``sock`` and ``headers`` must not
+ be set.
+ :param asgi: A (scope, receive, send) tuple from an ASGI request. If
+ this argument is provided, ``aiohttp``, ``sock`` and
+ ``headers`` must not be set.
+ :param sock: A connected socket to use. If this argument is provided,
+ ``aiohttp`` and ``asgi`` must not be set. The ``headers``
+ argument must be set with the incoming request headers.
+ :param headers: A dictionary with the incoming request headers, when
+ ``sock`` is used.
+ :param subprotocols: A list of supported subprotocols, or ``None`` (the
+ default) to disable subprotocol negotiation.
+ :param receive_bytes: The size of the receive buffer, in bytes. The
+ default is 4096.
+ :param ping_interval: Send ping packets to clients at the requested
+ interval in seconds. Set to ``None`` (the
+ default) to disable ping/pong logic. Enable to
+ prevent disconnections when the line is idle for
+ a certain amount of time, or to detect
+ unresponsive clients and disconnect them. A
+ recommended interval is 25 seconds.
+ :param max_message_size: The maximum size allowed for a message, in
+ bytes, or ``None`` for no limit. The default
+ is ``None``.
+ """
+ if aiohttp and (asgi or sock):
+ raise ValueError('aiohttp argument cannot be used with asgi or '
+ 'sock')
+ if asgi and (aiohttp or sock):
+ raise ValueError('asgi argument cannot be used with aiohttp or '
+ 'sock')
+ if asgi: # pragma: no cover
+ from .asgi import WebSocketASGI
+ return await WebSocketASGI.accept(asgi[0], asgi[1], asgi[2],
+ subprotocols=subprotocols)
+
+ ws = cls({'aiohttp': aiohttp, 'sock': sock, 'headers': headers},
+ subprotocols=subprotocols, receive_bytes=receive_bytes,
+ ping_interval=ping_interval,
+ max_message_size=max_message_size)
+ await ws._accept()
+ return ws
+
+ async def _accept(self):
+ if self.request['sock']: # pragma: no cover
+ # custom integration, request is a tuple with (socket, headers)
+ sock = self.request['sock']
+ self.headers = self.request['headers']
+ self.mode = 'custom'
+ elif self.request['aiohttp']:
+ # default implementation, request is an aiohttp request object
+ sock = self.request['aiohttp'].transport.get_extra_info(
+ 'socket').dup()
+ self.headers = self.request['aiohttp'].headers
+ self.mode = 'aiohttp'
+ else: # pragma: no cover
+ raise ValueError('Invalid request')
+ self.rsock, self.wsock = await asyncio.open_connection(sock=sock)
+ await super().connect()
+
+ async def handshake(self):
+ in_data = b'GET / HTTP/1.1\r\n'
+ for header, value in self.headers.items():
+ in_data += f'{header}: {value}\r\n'.encode()
+ in_data += b'\r\n'
+ self.ws.receive_data(in_data)
+ self.connected = await self._handle_events()
+
+ def choose_subprotocol(self, request):
+ """Choose a subprotocol to use for the WebSocket connection.
+
+ The default implementation selects the first protocol requested by the
+ client that is accepted by the server. Subclasses can override this
+ method to implement a different subprotocol negotiation algorithm.
+
+ :param request: A ``Request`` object.
+
+ The method should return the subprotocol to use, or ``None`` if no
+ subprotocol is chosen.
+ """
+ for subprotocol in request.subprotocols:
+ if subprotocol in self.subprotocols:
+ return subprotocol
+ return None
+
+
+class AioClient(AioBase):
+ """This class implements a WebSocket client.
+
+ Instead of creating an instance of this class directly, use the
+ ``connect()`` class method to create an instance that is connected to a
+ server.
+ """
+ def __init__(self, url, subprotocols=None, headers=None,
+ receive_bytes=4096, ping_interval=None, max_message_size=None,
+ ssl_context=None):
+ super().__init__(connection_type=ConnectionType.CLIENT,
+ receive_bytes=receive_bytes,
+ ping_interval=ping_interval,
+ max_message_size=max_message_size)
+ self.url = url
+ self.ssl_context = ssl_context
+ parsed_url = urlsplit(url)
+ self.is_secure = parsed_url.scheme in ['https', 'wss']
+ self.host = parsed_url.hostname
+ self.port = parsed_url.port or (443 if self.is_secure else 80)
+ self.path = parsed_url.path
+ if parsed_url.query:
+ self.path += '?' + parsed_url.query
+ self.subprotocols = subprotocols or []
+ if isinstance(self.subprotocols, str):
+ self.subprotocols = [self.subprotocols]
+
+ self.extra_headeers = []
+ if isinstance(headers, dict):
+ for key, value in headers.items():
+ self.extra_headeers.append((key, value))
+ elif isinstance(headers, list):
+ self.extra_headeers = headers
+
+ @classmethod
+ async def connect(cls, url, subprotocols=None, headers=None,
+ receive_bytes=4096, ping_interval=None,
+ max_message_size=None, ssl_context=None,
+ thread_class=None, event_class=None):
+ """Returns a WebSocket client connection.
+
+ :param url: The connection URL. Both ``ws://`` and ``wss://`` URLs are
+ accepted.
+ :param subprotocols: The name of the subprotocol to use, or a list of
+ subprotocol names in order of preference. Set to
+ ``None`` (the default) to not use a subprotocol.
+ :param headers: A dictionary or list of tuples with additional HTTP
+ headers to send with the connection request. Note that
+ custom headers are not supported by the WebSocket
+ protocol, so the use of this parameter is not
+ recommended.
+ :param receive_bytes: The size of the receive buffer, in bytes. The
+ default is 4096.
+ :param ping_interval: Send ping packets to the server at the requested
+ interval in seconds. Set to ``None`` (the
+ default) to disable ping/pong logic. Enable to
+ prevent disconnections when the line is idle for
+ a certain amount of time, or to detect an
+ unresponsive server and disconnect. A recommended
+ interval is 25 seconds. In general it is
+ preferred to enable ping/pong on the server, and
+ let the client respond with pong (which it does
+ regardless of this setting).
+ :param max_message_size: The maximum size allowed for a message, in
+ bytes, or ``None`` for no limit. The default
+ is ``None``.
+ :param ssl_context: An ``SSLContext`` instance, if a default SSL
+ context isn't sufficient.
+ """
+ ws = cls(url, subprotocols=subprotocols, headers=headers,
+ receive_bytes=receive_bytes, ping_interval=ping_interval,
+ max_message_size=max_message_size, ssl_context=ssl_context)
+ await ws._connect()
+ return ws
+
+ async def _connect(self):
+ if self.is_secure: # pragma: no cover
+ if self.ssl_context is None:
+ self.ssl_context = ssl.create_default_context(
+ purpose=ssl.Purpose.SERVER_AUTH)
+ self.rsock, self.wsock = await asyncio.open_connection(
+ self.host, self.port, ssl=self.ssl_context)
+ await super().connect()
+
+ async def handshake(self):
+ out_data = self.ws.send(Request(host=self.host, target=self.path,
+ subprotocols=self.subprotocols,
+ extra_headers=self.extra_headeers))
+ self.wsock.write(out_data)
+
+ while True:
+ in_data = await self.rsock.read(self.receive_bytes)
+ self.ws.receive_data(in_data)
+ try:
+ event = next(self.ws.events())
+ except StopIteration: # pragma: no cover
+ pass
+ else: # pragma: no cover
+ break
+ if isinstance(event, RejectConnection): # pragma: no cover
+ raise ConnectionError(event.status_code)
+ elif not isinstance(event, AcceptConnection): # pragma: no cover
+ raise ConnectionError(400)
+ self.subprotocol = event.subprotocol
+ self.connected = True
+
+ async def close(self, reason=None, message=None):
+ await super().close(reason=reason, message=message)
+ self.wsock.close()
diff --git a/.venv/Lib/site-packages/simple_websocket/asgi.py b/.venv/Lib/site-packages/simple_websocket/asgi.py
new file mode 100644
index 0000000..33d4d48
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket/asgi.py
@@ -0,0 +1,50 @@
+from .errors import ConnectionClosed # pragma: no cover
+
+
+class WebSocketASGI: # pragma: no cover
+ def __init__(self, scope, receive, send, subprotocols=None):
+ self._scope = scope
+ self._receive = receive
+ self._send = send
+ self.subprotocols = subprotocols or []
+ self.subprotocol = None
+ self.connected = False
+
+ @classmethod
+ async def accept(cls, scope, receive, send, subprotocols=None):
+ ws = WebSocketASGI(scope, receive, send, subprotocols=subprotocols)
+ await ws._accept()
+ return ws
+
+ async def _accept(self):
+ connect = await self._receive()
+ if connect['type'] != 'websocket.connect':
+ raise ValueError('Expected websocket.connect')
+ for subprotocol in self._scope['subprotocols']:
+ if subprotocol in self.subprotocols:
+ self.subprotocol = subprotocol
+ break
+ await self._send({'type': 'websocket.accept',
+ 'subprotocol': self.subprotocol})
+
+ async def receive(self):
+ message = await self._receive()
+ if message['type'] == 'websocket.disconnect':
+ raise ConnectionClosed()
+ elif message['type'] != 'websocket.receive':
+ raise OSError(32, 'Websocket message type not supported')
+ return message.get('text', message.get('bytes'))
+
+ async def send(self, data):
+ if isinstance(data, str):
+ await self._send({'type': 'websocket.send', 'text': data})
+ else:
+ await self._send({'type': 'websocket.send', 'bytes': data})
+
+ async def close(self):
+ if not self.connected:
+ self.conncted = False
+ try:
+ await self._send({'type': 'websocket.close'})
+ except Exception:
+ pass
diff --git a/.venv/Lib/site-packages/simple_websocket/errors.py b/.venv/Lib/site-packages/simple_websocket/errors.py
new file mode 100644
index 0000000..535b9d6
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket/errors.py
@@ -0,0 +1,16 @@
+from wsproto.frame_protocol import CloseReason
+
+
+class ConnectionError(RuntimeError): # pragma: no cover
+ """Connection error exception class."""
+ def __init__(self, status_code=None):
+ self.status_code = status_code
+ super().__init__(f'Connection error: {status_code}')
+
+
+class ConnectionClosed(RuntimeError):
+ """Connection closed exception class."""
+ def __init__(self, reason=CloseReason.NO_STATUS_RCVD, message=None):
+ self.reason = reason
+ self.message = message
+ super().__init__(f'Connection closed: {reason} {message or ""}')
diff --git a/.venv/Lib/site-packages/simple_websocket/ws.py b/.venv/Lib/site-packages/simple_websocket/ws.py
new file mode 100644
index 0000000..627f53d
--- /dev/null
+++ b/.venv/Lib/site-packages/simple_websocket/ws.py
@@ -0,0 +1,478 @@
+import selectors
+import socket
+import ssl
+from time import time
+from urllib.parse import urlsplit
+
+from wsproto import ConnectionType, WSConnection
+from wsproto.events import (
+ AcceptConnection,
+ RejectConnection,
+ CloseConnection,
+ Message,
+ Request,
+ Ping,
+ Pong,
+ TextMessage,
+ BytesMessage,
+)
+from wsproto.extensions import PerMessageDeflate
+from wsproto.frame_protocol import CloseReason
+from wsproto.utilities import LocalProtocolError
+from .errors import ConnectionError, ConnectionClosed
+
+
+class Base:
+ def __init__(self, sock=None, connection_type=None, receive_bytes=4096,
+ ping_interval=None, max_message_size=None,
+ thread_class=None, event_class=None, selector_class=None):
+ #: The name of the subprotocol chosen for the WebSocket connection.
+ self.subprotocol = None
+
+ self.sock = sock
+ self.receive_bytes = receive_bytes
+ self.ping_interval = ping_interval
+ self.max_message_size = max_message_size
+ self.pong_received = True
+ self.input_buffer = []
+ self.incoming_message = None
+ self.incoming_message_len = 0
+ self.connected = False
+ self.is_server = (connection_type == ConnectionType.SERVER)
+ self.close_reason = CloseReason.NO_STATUS_RCVD
+ self.close_message = None
+
+ if thread_class is None:
+ import threading
+ thread_class = threading.Thread
+ if event_class is None: # pragma: no branch
+ import threading
+ event_class = threading.Event
+ if selector_class is None:
+ selector_class = selectors.DefaultSelector
+ self.selector_class = selector_class
+ self.event = event_class()
+
+ self.ws = WSConnection(connection_type)
+ self.handshake()
+
+ if not self.connected: # pragma: no cover
+ raise ConnectionError()
+ self.thread = thread_class(target=self._thread)
+ self.thread.name = self.thread.name.replace(
+ '(_thread)', '(simple_websocket.Base._thread)')
+ self.thread.start()
+
+ def handshake(self): # pragma: no cover
+ # to be implemented by subclasses
+ pass
+
+ def send(self, data):
+ """Send data over the WebSocket connection.
+
+ :param data: The data to send. If ``data`` is of type ``bytes``, then
+ a binary message is sent. Else, the message is sent in
+ text format.
+ """
+ if not self.connected:
+ raise ConnectionClosed(self.close_reason, self.close_message)
+ if isinstance(data, bytes):
+ out_data = self.ws.send(Message(data=data))
+ else:
+ out_data = self.ws.send(TextMessage(data=str(data)))
+ self.sock.send(out_data)
+
+ def receive(self, timeout=None):
+ """Receive data over the WebSocket connection.
+
+ :param timeout: Amount of time to wait for the data, in seconds. Set
+ to ``None`` (the default) to wait indefinitely. Set
+ to 0 to read without blocking.
+
+ The data received is returned, as ``bytes`` or ``str``, depending on
+ the type of the incoming message.
+ """
+ while self.connected and not self.input_buffer:
+ if not self.event.wait(timeout=timeout):
+ return None
+ self.event.clear()
+ try:
+ return self.input_buffer.pop(0)
+ except IndexError:
+ pass
+ if not self.connected: # pragma: no cover
+ raise ConnectionClosed(self.close_reason, self.close_message)
+
+ def close(self, reason=None, message=None):
+ """Close the WebSocket connection.
+
+ :param reason: A numeric status code indicating the reason of the
+ closure, as defined by the WebSocket specification. The
+ default is 1000 (normal closure).
+ :param message: A text message to be sent to the other side.
+ """
+ if not self.connected:
+ raise ConnectionClosed(self.close_reason, self.close_message)
+ out_data = self.ws.send(CloseConnection(
+ reason or CloseReason.NORMAL_CLOSURE, message))
+ try:
+ self.sock.send(out_data)
+ except BrokenPipeError: # pragma: no cover
+ pass
+ self.connected = False
+
+ def choose_subprotocol(self, request): # pragma: no cover
+ # The method should return the subprotocol to use, or ``None`` if no
+ # subprotocol is chosen. Can be overridden by subclasses that implement
+ # the server-side of the WebSocket protocol.
+ return None
+
+ def _thread(self):
+ sel = None
+ if self.ping_interval:
+ next_ping = time() + self.ping_interval
+ sel = self.selector_class()
+ sel.register(self.sock, selectors.EVENT_READ, True)
+
+ while self.connected:
+ try:
+ if sel:
+ now = time()
+ if next_ping <= now or not sel.select(next_ping - now):
+ # we reached the timeout, we have to send a ping
+ if not self.pong_received:
+ self.close(reason=CloseReason.POLICY_VIOLATION,
+ message='Ping/Pong timeout')
+ break
+ self.pong_received = False
+ self.sock.send(self.ws.send(Ping()))
+ next_ping = max(now, next_ping) + self.ping_interval
+ continue
+ in_data = self.sock.recv(self.receive_bytes)
+ if len(in_data) == 0:
+ raise OSError()
+ self.ws.receive_data(in_data)
+ self.connected = self._handle_events()
+ except (OSError, ConnectionResetError): # pragma: no cover
+ self.connected = False
+ self.event.set()
+ break
+ sel.close() if sel else None
+ self.sock.close()
+
+ def _handle_events(self):
+ keep_going = True
+ out_data = b''
+ for event in self.ws.events():
+ try:
+ if isinstance(event, Request):
+ self.subprotocol = self.choose_subprotocol(event)
+ out_data += self.ws.send(AcceptConnection(
+ subprotocol=self.subprotocol,
+ extensions=[PerMessageDeflate()]))
+ elif isinstance(event, CloseConnection):
+ if self.is_server:
+ out_data += self.ws.send(event.response())
+ self.close_reason = event.code
+ self.close_message = event.reason
+ self.connected = False
+ self.event.set()
+ keep_going = False
+ elif isinstance(event, Ping):
+ out_data += self.ws.send(event.response())
+ elif isinstance(event, Pong):
+ self.pong_received = True
+ elif isinstance(event, (TextMessage, BytesMessage)):
+ self.incoming_message_len += len(event.data)
+ if self.max_message_size and \
+ self.incoming_message_len > self.max_message_size:
+ out_data += self.ws.send(CloseConnection(
+ CloseReason.MESSAGE_TOO_BIG, 'Message is too big'))
+ self.event.set()
+ keep_going = False
+ break
+ if self.incoming_message is None:
+ # store message as is first
+ # if it is the first of a group, the message will be
+ # converted to bytearray on arrival of the second
+ # part, since bytearrays are mutable and can be
+ # concatenated more efficiently
+ self.incoming_message = event.data
+ elif isinstance(event, TextMessage):
+ if not isinstance(self.incoming_message, bytearray):
+ # convert to bytearray and append
+ self.incoming_message = bytearray(
+ (self.incoming_message + event.data).encode())
+ else:
+ # append to bytearray
+ self.incoming_message += event.data.encode()
+ else:
+ if not isinstance(self.incoming_message, bytearray):
+ # convert to mutable bytearray and append
+ self.incoming_message = bytearray(
+ self.incoming_message + event.data)
+ else:
+ # append to bytearray
+ self.incoming_message += event.data
+ if not event.message_finished:
+ continue
+ if isinstance(self.incoming_message, (str, bytes)):
+ # single part message
+ self.input_buffer.append(self.incoming_message)
+ elif isinstance(event, TextMessage):
+ # convert multi-part message back to text
+ self.input_buffer.append(
+ self.incoming_message.decode())
+ else:
+ # convert multi-part message back to bytes
+ self.input_buffer.append(bytes(self.incoming_message))
+ self.incoming_message = None
+ self.incoming_message_len = 0
+ self.event.set()
+ else: # pragma: no cover
+ pass
+ except LocalProtocolError: # pragma: no cover
+ out_data = b''
+ self.event.set()
+ keep_going = False
+ if out_data:
+ self.sock.send(out_data)
+ return keep_going
+
+
+class Server(Base):
+ """This class implements a WebSocket server.
+
+ Instead of creating an instance of this class directly, use the
+ ``accept()`` class method to create individual instances of the server,
+ each bound to a client request.
+ """
+ def __init__(self, environ, subprotocols=None, receive_bytes=4096,
+ ping_interval=None, max_message_size=None, thread_class=None,
+ event_class=None, selector_class=None):
+ self.environ = environ
+ self.subprotocols = subprotocols or []
+ if isinstance(self.subprotocols, str):
+ self.subprotocols = [self.subprotocols]
+ self.mode = 'unknown'
+ sock = None
+ if 'werkzeug.socket' in environ:
+ # extract socket from Werkzeug's WSGI environment
+ sock = environ.get('werkzeug.socket')
+ self.mode = 'werkzeug'
+ elif 'gunicorn.socket' in environ:
+ # extract socket from Gunicorn WSGI environment
+ sock = environ.get('gunicorn.socket')
+ self.mode = 'gunicorn'
+ elif 'eventlet.input' in environ: # pragma: no cover
+ # extract socket from Eventlet's WSGI environment
+ sock = environ.get('eventlet.input').get_socket()
+ self.mode = 'eventlet'
+ elif environ.get('SERVER_SOFTWARE', '').startswith(
+ 'gevent'): # pragma: no cover
+ # extract socket from Gevent's WSGI environment
+ wsgi_input = environ['wsgi.input']
+ if not hasattr(wsgi_input, 'raw') and hasattr(wsgi_input, 'rfile'):
+ wsgi_input = wsgi_input.rfile
+ if hasattr(wsgi_input, 'raw'):
+ sock = wsgi_input.raw._sock
+ try:
+ sock = sock.dup()
+ except NotImplementedError:
+ pass
+ self.mode = 'gevent'
+ if sock is None:
+ raise RuntimeError('Cannot obtain socket from WSGI environment.')
+ super().__init__(sock, connection_type=ConnectionType.SERVER,
+ receive_bytes=receive_bytes,
+ ping_interval=ping_interval,
+ max_message_size=max_message_size,
+ thread_class=thread_class, event_class=event_class,
+ selector_class=selector_class)
+
+ @classmethod
+ def accept(cls, environ, subprotocols=None, receive_bytes=4096,
+ ping_interval=None, max_message_size=None, thread_class=None,
+ event_class=None, selector_class=None):
+ """Accept a WebSocket connection from a client.
+
+ :param environ: A WSGI ``environ`` dictionary with the request details.
+ Among other things, this class expects to find the
+ low-level network socket for the connection somewhere
+ in this dictionary. Since the WSGI specification does
+ not cover where or how to store this socket, each web
+ server does this in its own different way. Werkzeug,
+ Gunicorn, Eventlet and Gevent are the only web servers
+ that are currently supported.
+ :param subprotocols: A list of supported subprotocols, or ``None`` (the
+ default) to disable subprotocol negotiation.
+ :param receive_bytes: The size of the receive buffer, in bytes. The
+ default is 4096.
+ :param ping_interval: Send ping packets to clients at the requested
+ interval in seconds. Set to ``None`` (the
+ default) to disable ping/pong logic. Enable to
+ prevent disconnections when the line is idle for
+ a certain amount of time, or to detect
+ unresponsive clients and disconnect them. A
+ recommended interval is 25 seconds.
+ :param max_message_size: The maximum size allowed for a message, in
+ bytes, or ``None`` for no limit. The default
+ is ``None``.
+ :param thread_class: The ``Thread`` class to use when creating
+ background threads. The default is the
+ ``threading.Thread`` class from the Python
+ standard library.
+ :param event_class: The ``Event`` class to use when creating event
+ objects. The default is the `threading.Event``
+ class from the Python standard library.
+ :param selector_class: The ``Selector`` class to use when creating
+ selectors. The default is the
+ ``selectors.DefaultSelector`` class from the
+ Python standard library.
+ """
+ return cls(environ, subprotocols=subprotocols,
+ receive_bytes=receive_bytes, ping_interval=ping_interval,
+ max_message_size=max_message_size,
+ thread_class=thread_class, event_class=event_class,
+ selector_class=selector_class)
+
+ def handshake(self):
+ in_data = b'GET / HTTP/1.1\r\n'
+ for key, value in self.environ.items():
+ if key.startswith('HTTP_'):
+ header = '-'.join([p.capitalize() for p in key[5:].split('_')])
+ in_data += f'{header}: {value}\r\n'.encode()
+ in_data += b'\r\n'
+ self.ws.receive_data(in_data)
+ self.connected = self._handle_events()
+
+ def choose_subprotocol(self, request):
+ """Choose a subprotocol to use for the WebSocket connection.
+
+ The default implementation selects the first protocol requested by the
+ client that is accepted by the server. Subclasses can override this
+ method to implement a different subprotocol negotiation algorithm.
+
+ :param request: A ``Request`` object.
+
+ The method should return the subprotocol to use, or ``None`` if no
+ subprotocol is chosen.
+ """
+ for subprotocol in request.subprotocols:
+ if subprotocol in self.subprotocols:
+ return subprotocol
+ return None
+
+
+class Client(Base):
+ """This class implements a WebSocket client.
+
+ Instead of creating an instance of this class directly, use the
+ ``connect()`` class method to create an instance that is connected to a
+ server.
+ """
+ def __init__(self, url, subprotocols=None, headers=None,
+ receive_bytes=4096, ping_interval=None, max_message_size=None,
+ ssl_context=None, thread_class=None, event_class=None):
+ parsed_url = urlsplit(url)
+ is_secure = parsed_url.scheme in ['https', 'wss']
+ self.host = parsed_url.hostname
+ self.port = parsed_url.port or (443 if is_secure else 80)
+ self.path = parsed_url.path
+ if parsed_url.query:
+ self.path += '?' + parsed_url.query
+ self.subprotocols = subprotocols or []
+ if isinstance(self.subprotocols, str):
+ self.subprotocols = [self.subprotocols]
+
+ self.extra_headeers = []
+ if isinstance(headers, dict):
+ for key, value in headers.items():
+ self.extra_headeers.append((key, value))
+ elif isinstance(headers, list):
+ self.extra_headeers = headers
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if is_secure: # pragma: no cover
+ if ssl_context is None:
+ ssl_context = ssl.create_default_context(
+ purpose=ssl.Purpose.SERVER_AUTH)
+ sock = ssl_context.wrap_socket(sock, server_hostname=self.host)
+ sock.connect((self.host, self.port))
+ super().__init__(sock, connection_type=ConnectionType.CLIENT,
+ receive_bytes=receive_bytes,
+ ping_interval=ping_interval,
+ max_message_size=max_message_size,
+ thread_class=thread_class, event_class=event_class)
+
+ @classmethod
+ def connect(cls, url, subprotocols=None, headers=None,
+ receive_bytes=4096, ping_interval=None, max_message_size=None,
+ ssl_context=None, thread_class=None, event_class=None):
+ """Returns a WebSocket client connection.
+
+ :param url: The connection URL. Both ``ws://`` and ``wss://`` URLs are
+ accepted.
+ :param subprotocols: The name of the subprotocol to use, or a list of
+ subprotocol names in order of preference. Set to
+ ``None`` (the default) to not use a subprotocol.
+ :param headers: A dictionary or list of tuples with additional HTTP
+ headers to send with the connection request. Note that
+ custom headers are not supported by the WebSocket
+ protocol, so the use of this parameter is not
+ recommended.
+ :param receive_bytes: The size of the receive buffer, in bytes. The
+ default is 4096.
+ :param ping_interval: Send ping packets to the server at the requested
+ interval in seconds. Set to ``None`` (the
+ default) to disable ping/pong logic. Enable to
+ prevent disconnections when the line is idle for
+ a certain amount of time, or to detect an
+ unresponsive server and disconnect. A recommended
+ interval is 25 seconds. In general it is
+ preferred to enable ping/pong on the server, and
+ let the client respond with pong (which it does
+ regardless of this setting).
+ :param max_message_size: The maximum size allowed for a message, in
+ bytes, or ``None`` for no limit. The default
+ is ``None``.
+ :param ssl_context: An ``SSLContext`` instance, if a default SSL
+ context isn't sufficient.
+ :param thread_class: The ``Thread`` class to use when creating
+ background threads. The default is the
+ ``threading.Thread`` class from the Python
+ standard library.
+ :param event_class: The ``Event`` class to use when creating event
+ objects. The default is the `threading.Event``
+ class from the Python standard library.
+ """
+ return cls(url, subprotocols=subprotocols, headers=headers,
+ receive_bytes=receive_bytes, ping_interval=ping_interval,
+ max_message_size=max_message_size, ssl_context=ssl_context,
+ thread_class=thread_class, event_class=event_class)
+
+ def handshake(self):
+ out_data = self.ws.send(Request(host=self.host, target=self.path,
+ subprotocols=self.subprotocols,
+ extra_headers=self.extra_headeers))
+ self.sock.send(out_data)
+
+ while True:
+ in_data = self.sock.recv(self.receive_bytes)
+ self.ws.receive_data(in_data)
+ try:
+ event = next(self.ws.events())
+ except StopIteration: # pragma: no cover
+ pass
+ else: # pragma: no cover
+ break
+ if isinstance(event, RejectConnection): # pragma: no cover
+ raise ConnectionError(event.status_code)
+ elif not isinstance(event, AcceptConnection): # pragma: no cover
+ raise ConnectionError(400)
+ self.subprotocol = event.subprotocol
+ self.connected = True
+
+ def close(self, reason=None, message=None):
+ super().close(reason=reason, message=message)
+ self.sock.close()
diff --git a/.venv/Lib/site-packages/socketio/__init__.py b/.venv/Lib/site-packages/socketio/__init__.py
new file mode 100644
index 0000000..95642f4
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/__init__.py
@@ -0,0 +1,28 @@
+from .client import Client
+from .simple_client import SimpleClient
+from .manager import Manager
+from .pubsub_manager import PubSubManager
+from .kombu_manager import KombuManager
+from .redis_manager import RedisManager
+from .kafka_manager import KafkaManager
+from .zmq_manager import ZmqManager
+from .server import Server
+from .namespace import Namespace, ClientNamespace
+from .middleware import WSGIApp, Middleware
+from .tornado import get_tornado_handler
+from .async_client import AsyncClient
+from .async_simple_client import AsyncSimpleClient
+from .async_server import AsyncServer
+from .async_manager import AsyncManager
+from .async_namespace import AsyncNamespace, AsyncClientNamespace
+from .async_redis_manager import AsyncRedisManager
+from .async_aiopika_manager import AsyncAioPikaManager
+from .asgi import ASGIApp
+
+__all__ = ['SimpleClient', 'Client', 'Server', 'Manager', 'PubSubManager',
+ 'KombuManager', 'RedisManager', 'ZmqManager', 'KafkaManager',
+ 'Namespace', 'ClientNamespace', 'WSGIApp', 'Middleware',
+ 'AsyncSimpleClient', 'AsyncClient', 'AsyncServer',
+ 'AsyncNamespace', 'AsyncClientNamespace', 'AsyncManager',
+ 'AsyncRedisManager', 'ASGIApp', 'get_tornado_handler',
+ 'AsyncAioPikaManager']
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..eaa1f9f
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/admin.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/admin.cpython-312.pyc
new file mode 100644
index 0000000..ea7119c
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/admin.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/asgi.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/asgi.cpython-312.pyc
new file mode 100644
index 0000000..0b4519d
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/asgi.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/async_admin.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/async_admin.cpython-312.pyc
new file mode 100644
index 0000000..dd698e4
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/async_admin.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/async_aiopika_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/async_aiopika_manager.cpython-312.pyc
new file mode 100644
index 0000000..7e11c1f
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/async_aiopika_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/async_client.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/async_client.cpython-312.pyc
new file mode 100644
index 0000000..53eccc0
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/async_client.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/async_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/async_manager.cpython-312.pyc
new file mode 100644
index 0000000..37248ed
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/async_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/async_namespace.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/async_namespace.cpython-312.pyc
new file mode 100644
index 0000000..efebefc
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/async_namespace.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/async_pubsub_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/async_pubsub_manager.cpython-312.pyc
new file mode 100644
index 0000000..8b04a2b
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/async_pubsub_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/async_redis_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/async_redis_manager.cpython-312.pyc
new file mode 100644
index 0000000..2d70290
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/async_redis_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/async_server.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/async_server.cpython-312.pyc
new file mode 100644
index 0000000..73e1e20
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/async_server.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/async_simple_client.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/async_simple_client.cpython-312.pyc
new file mode 100644
index 0000000..90f1697
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/async_simple_client.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/base_client.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/base_client.cpython-312.pyc
new file mode 100644
index 0000000..7350f78
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/base_client.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/base_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/base_manager.cpython-312.pyc
new file mode 100644
index 0000000..bb88904
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/base_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/base_namespace.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/base_namespace.cpython-312.pyc
new file mode 100644
index 0000000..8fe7946
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/base_namespace.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/base_server.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/base_server.cpython-312.pyc
new file mode 100644
index 0000000..d548762
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/base_server.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/client.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/client.cpython-312.pyc
new file mode 100644
index 0000000..469d989
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/client.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/exceptions.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/exceptions.cpython-312.pyc
new file mode 100644
index 0000000..ce90d41
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/exceptions.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/kafka_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/kafka_manager.cpython-312.pyc
new file mode 100644
index 0000000..9f30803
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/kafka_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/kombu_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/kombu_manager.cpython-312.pyc
new file mode 100644
index 0000000..6f5bb24
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/kombu_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/manager.cpython-312.pyc
new file mode 100644
index 0000000..d141940
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/middleware.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/middleware.cpython-312.pyc
new file mode 100644
index 0000000..8118814
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/middleware.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/msgpack_packet.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/msgpack_packet.cpython-312.pyc
new file mode 100644
index 0000000..585a6a7
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/msgpack_packet.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/namespace.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/namespace.cpython-312.pyc
new file mode 100644
index 0000000..c3bf042
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/namespace.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/packet.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/packet.cpython-312.pyc
new file mode 100644
index 0000000..9bbd675
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/packet.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/pubsub_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/pubsub_manager.cpython-312.pyc
new file mode 100644
index 0000000..df67223
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/pubsub_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/redis_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/redis_manager.cpython-312.pyc
new file mode 100644
index 0000000..2b9595a
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/redis_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/server.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/server.cpython-312.pyc
new file mode 100644
index 0000000..eb3020f
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/server.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/simple_client.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/simple_client.cpython-312.pyc
new file mode 100644
index 0000000..e8a5c93
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/simple_client.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/tornado.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/tornado.cpython-312.pyc
new file mode 100644
index 0000000..b8194fe
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/tornado.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/__pycache__/zmq_manager.cpython-312.pyc b/.venv/Lib/site-packages/socketio/__pycache__/zmq_manager.cpython-312.pyc
new file mode 100644
index 0000000..f27a89e
Binary files /dev/null and b/.venv/Lib/site-packages/socketio/__pycache__/zmq_manager.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/socketio/admin.py b/.venv/Lib/site-packages/socketio/admin.py
new file mode 100644
index 0000000..f317ea2
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/admin.py
@@ -0,0 +1,405 @@
+from datetime import datetime
+import functools
+import os
+import socket
+import time
+from urllib.parse import parse_qs
+from .exceptions import ConnectionRefusedError
+
+HOSTNAME = socket.gethostname()
+PID = os.getpid()
+
+
+class EventBuffer:
+ def __init__(self):
+ self.buffer = {}
+
+ def push(self, type, count=1):
+ timestamp = int(time.time()) * 1000
+ key = '{};{}'.format(timestamp, type)
+ if key not in self.buffer:
+ self.buffer[key] = {
+ 'timestamp': timestamp,
+ 'type': type,
+ 'count': count,
+ }
+ else:
+ self.buffer[key]['count'] += count
+
+ def get_and_clear(self):
+ buffer = self.buffer
+ self.buffer = {}
+ return [value for value in buffer.values()]
+
+
+class InstrumentedServer:
+ def __init__(self, sio, auth=None, mode='development', read_only=False,
+ server_id=None, namespace='/admin', server_stats_interval=2):
+ """Instrument the Socket.IO server for monitoring with the `Socket.IO
+ Admin UI `_.
+ """
+ if auth is None:
+ raise ValueError('auth must be specified')
+ self.sio = sio
+ self.auth = auth
+ self.admin_namespace = namespace
+ self.read_only = read_only
+ self.server_id = server_id or (
+ self.sio.manager.host_id if hasattr(self.sio.manager, 'host_id')
+ else HOSTNAME
+ )
+ self.mode = mode
+ self.server_stats_interval = server_stats_interval
+ self.event_buffer = EventBuffer()
+
+ # task that emits "server_stats" every 2 seconds
+ self.stop_stats_event = None
+ self.stats_task = None
+
+ # monkey-patch the server to report metrics to the admin UI
+ self.instrument()
+
+ def instrument(self):
+ self.sio.on('connect', self.admin_connect,
+ namespace=self.admin_namespace)
+
+ if self.mode == 'development':
+ if not self.read_only: # pragma: no branch
+ self.sio.on('emit', self.admin_emit,
+ namespace=self.admin_namespace)
+ self.sio.on('join', self.admin_enter_room,
+ namespace=self.admin_namespace)
+ self.sio.on('leave', self.admin_leave_room,
+ namespace=self.admin_namespace)
+ self.sio.on('_disconnect', self.admin_disconnect,
+ namespace=self.admin_namespace)
+
+ # track socket connection times
+ self.sio.manager._timestamps = {}
+
+ # report socket.io connections
+ self.sio.manager.__connect = self.sio.manager.connect
+ self.sio.manager.connect = self._connect
+
+ # report socket.io disconnection
+ self.sio.manager.__disconnect = self.sio.manager.disconnect
+ self.sio.manager.disconnect = self._disconnect
+
+ # report join rooms
+ self.sio.manager.__basic_enter_room = \
+ self.sio.manager.basic_enter_room
+ self.sio.manager.basic_enter_room = self._basic_enter_room
+
+ # report leave rooms
+ self.sio.manager.__basic_leave_room = \
+ self.sio.manager.basic_leave_room
+ self.sio.manager.basic_leave_room = self._basic_leave_room
+
+ # report emit events
+ self.sio.manager.__emit = self.sio.manager.emit
+ self.sio.manager.emit = self._emit
+
+ # report receive events
+ self.sio.__handle_event_internal = self.sio._handle_event_internal
+ self.sio._handle_event_internal = self._handle_event_internal
+
+ # report engine.io connections
+ self.sio.eio.on('connect', self._handle_eio_connect)
+ self.sio.eio.on('disconnect', self._handle_eio_disconnect)
+
+ # report polling packets
+ from engineio.socket import Socket
+ self.sio.eio.__ok = self.sio.eio._ok
+ self.sio.eio._ok = self._eio_http_response
+ Socket.__handle_post_request = Socket.handle_post_request
+ Socket.handle_post_request = functools.partialmethod(
+ self.__class__._eio_handle_post_request, self)
+
+ # report websocket packets
+ Socket.__websocket_handler = Socket._websocket_handler
+ Socket._websocket_handler = functools.partialmethod(
+ self.__class__._eio_websocket_handler, self)
+
+ # report connected sockets with each ping
+ if self.mode == 'development':
+ Socket.__send_ping = Socket._send_ping
+ Socket._send_ping = functools.partialmethod(
+ self.__class__._eio_send_ping, self)
+
+ def uninstrument(self): # pragma: no cover
+ if self.mode == 'development':
+ self.sio.manager.connect = self.sio.manager.__connect
+ self.sio.manager.disconnect = self.sio.manager.__disconnect
+ self.sio.manager.basic_enter_room = \
+ self.sio.manager.__basic_enter_room
+ self.sio.manager.basic_leave_room = \
+ self.sio.manager.__basic_leave_room
+ self.sio.manager.emit = self.sio.manager.__emit
+ self.sio._handle_event_internal = self.sio.__handle_event_internal
+ self.sio.eio._ok = self.sio.eio.__ok
+
+ from engineio.socket import Socket
+ Socket.handle_post_request = Socket.__handle_post_request
+ Socket._websocket_handler = Socket.__websocket_handler
+ if self.mode == 'development':
+ Socket._send_ping = Socket.__send_ping
+
+ def admin_connect(self, sid, environ, client_auth):
+ if self.auth:
+ authenticated = False
+ if isinstance(self.auth, dict):
+ authenticated = client_auth == self.auth
+ elif isinstance(self.auth, list):
+ authenticated = client_auth in self.auth
+ else:
+ authenticated = self.auth(client_auth)
+ if not authenticated:
+ raise ConnectionRefusedError('authentication failed')
+
+ def config(sid):
+ self.sio.sleep(0.1)
+
+ # supported features
+ features = ['AGGREGATED_EVENTS']
+ if not self.read_only:
+ features += ['EMIT', 'JOIN', 'LEAVE', 'DISCONNECT', 'MJOIN',
+ 'MLEAVE', 'MDISCONNECT']
+ if self.mode == 'development':
+ features.append('ALL_EVENTS')
+ self.sio.emit('config', {'supportedFeatures': features},
+ to=sid, namespace=self.admin_namespace)
+
+ # send current sockets
+ if self.mode == 'development':
+ all_sockets = []
+ for nsp in self.sio.manager.get_namespaces():
+ for sid, eio_sid in self.sio.manager.get_participants(
+ nsp, None):
+ all_sockets.append(
+ self.serialize_socket(sid, nsp, eio_sid))
+ self.sio.emit('all_sockets', all_sockets, to=sid,
+ namespace=self.admin_namespace)
+
+ self.sio.start_background_task(config, sid)
+
+ def admin_emit(self, _, namespace, room_filter, event, *data):
+ self.sio.emit(event, data, to=room_filter, namespace=namespace)
+
+ def admin_enter_room(self, _, namespace, room, room_filter=None):
+ for sid, _ in self.sio.manager.get_participants(
+ namespace, room_filter):
+ self.sio.enter_room(sid, room, namespace=namespace)
+
+ def admin_leave_room(self, _, namespace, room, room_filter=None):
+ for sid, _ in self.sio.manager.get_participants(
+ namespace, room_filter):
+ self.sio.leave_room(sid, room, namespace=namespace)
+
+ def admin_disconnect(self, _, namespace, close, room_filter=None):
+ for sid, _ in self.sio.manager.get_participants(
+ namespace, room_filter):
+ self.sio.disconnect(sid, namespace=namespace)
+
+ def shutdown(self):
+ if self.stats_task: # pragma: no branch
+ self.stop_stats_event.set()
+ self.stats_task.join()
+
+ def _connect(self, eio_sid, namespace):
+ sid = self.sio.manager.__connect(eio_sid, namespace)
+ t = time.time()
+ self.sio.manager._timestamps[sid] = t
+ serialized_socket = self.serialize_socket(sid, namespace, eio_sid)
+ self.sio.emit('socket_connected', (
+ serialized_socket,
+ datetime.utcfromtimestamp(t).isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return sid
+
+ def _disconnect(self, sid, namespace, **kwargs):
+ del self.sio.manager._timestamps[sid]
+ self.sio.emit('socket_disconnected', (
+ namespace,
+ sid,
+ 'N/A',
+ datetime.utcnow().isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return self.sio.manager.__disconnect(sid, namespace, **kwargs)
+
+ def _check_for_upgrade(self, eio_sid, sid, namespace): # pragma: no cover
+ for _ in range(5):
+ self.sio.sleep(5)
+ try:
+ if self.sio.eio._get_socket(eio_sid).upgraded:
+ self.sio.emit('socket_updated', {
+ 'id': sid,
+ 'nsp': namespace,
+ 'transport': 'websocket',
+ }, namespace=self.admin_namespace)
+ break
+ except KeyError:
+ pass
+
+ def _basic_enter_room(self, sid, namespace, room, eio_sid=None):
+ ret = self.sio.manager.__basic_enter_room(sid, namespace, room,
+ eio_sid)
+ if room:
+ self.sio.emit('room_joined', (
+ namespace,
+ room,
+ sid,
+ datetime.utcnow().isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return ret
+
+ def _basic_leave_room(self, sid, namespace, room):
+ if room:
+ self.sio.emit('room_left', (
+ namespace,
+ room,
+ sid,
+ datetime.utcnow().isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return self.sio.manager.__basic_leave_room(sid, namespace, room)
+
+ def _emit(self, event, data, namespace, room=None, skip_sid=None,
+ callback=None, **kwargs):
+ ret = self.sio.manager.__emit(event, data, namespace, room=room,
+ skip_sid=skip_sid, callback=callback,
+ **kwargs)
+ if namespace != self.admin_namespace:
+ event_data = [event] + list(data) if isinstance(data, tuple) \
+ else [data]
+ if not isinstance(skip_sid, list): # pragma: no branch
+ skip_sid = [skip_sid]
+ for sid, _ in self.sio.manager.get_participants(namespace, room):
+ if sid not in skip_sid:
+ self.sio.emit('event_sent', (
+ namespace,
+ sid,
+ event_data,
+ datetime.utcnow().isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return ret
+
+ def _handle_event_internal(self, server, sid, eio_sid, data, namespace,
+ id):
+ ret = self.sio.__handle_event_internal(server, sid, eio_sid, data,
+ namespace, id)
+ self.sio.emit('event_received', (
+ namespace,
+ sid,
+ data,
+ datetime.utcnow().isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return ret
+
+ def _handle_eio_connect(self, eio_sid, environ):
+ if self.stop_stats_event is None:
+ self.stop_stats_event = self.sio.eio.create_event()
+ self.stats_task = self.sio.start_background_task(
+ self._emit_server_stats)
+
+ self.event_buffer.push('rawConnection')
+ return self.sio._handle_eio_connect(eio_sid, environ)
+
+ def _handle_eio_disconnect(self, eio_sid):
+ self.event_buffer.push('rawDisconnection')
+ return self.sio._handle_eio_disconnect(eio_sid)
+
+ def _eio_http_response(self, packets=None, headers=None, jsonp_index=None):
+ ret = self.sio.eio.__ok(packets=packets, headers=headers,
+ jsonp_index=jsonp_index)
+ self.event_buffer.push('packetsOut')
+ self.event_buffer.push('bytesOut', len(ret['response']))
+ return ret
+
+ def _eio_handle_post_request(socket, self, environ):
+ ret = socket.__handle_post_request(environ)
+ self.event_buffer.push('packetsIn')
+ self.event_buffer.push(
+ 'bytesIn', int(environ.get('CONTENT_LENGTH', 0)))
+ return ret
+
+ def _eio_websocket_handler(socket, self, ws):
+ def _send(ws, data, *args, **kwargs):
+ self.event_buffer.push('packetsOut')
+ self.event_buffer.push('bytesOut', len(data))
+ return ws.__send(data, *args, **kwargs)
+
+ def _wait(ws):
+ ret = ws.__wait()
+ self.event_buffer.push('packetsIn')
+ self.event_buffer.push('bytesIn', len(ret or ''))
+ return ret
+
+ ws.__send = ws.send
+ ws.send = functools.partial(_send, ws)
+ ws.__wait = ws.wait
+ ws.wait = functools.partial(_wait, ws)
+ return socket.__websocket_handler(ws)
+
+ def _eio_send_ping(socket, self): # pragma: no cover
+ eio_sid = socket.sid
+ t = time.time()
+ for namespace in self.sio.manager.get_namespaces():
+ sid = self.sio.manager.sid_from_eio_sid(eio_sid, namespace)
+ if sid:
+ serialized_socket = self.serialize_socket(sid, namespace,
+ eio_sid)
+ self.sio.emit('socket_connected', (
+ serialized_socket,
+ datetime.utcfromtimestamp(t).isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return socket.__send_ping()
+
+ def _emit_server_stats(self):
+ start_time = time.time()
+ namespaces = list(self.sio.handlers.keys())
+ namespaces.sort()
+ while not self.stop_stats_event.is_set():
+ self.sio.sleep(self.server_stats_interval)
+ self.sio.emit('server_stats', {
+ 'serverId': self.server_id,
+ 'hostname': HOSTNAME,
+ 'pid': PID,
+ 'uptime': time.time() - start_time,
+ 'clientsCount': len(self.sio.eio.sockets),
+ 'pollingClientsCount': len(
+ [s for s in self.sio.eio.sockets.values()
+ if not s.upgraded]),
+ 'aggregatedEvents': self.event_buffer.get_and_clear(),
+ 'namespaces': [{
+ 'name': nsp,
+ 'socketsCount': len(self.sio.manager.rooms.get(
+ nsp, {None: []}).get(None, []))
+ } for nsp in namespaces],
+ }, namespace=self.admin_namespace)
+
+ def serialize_socket(self, sid, namespace, eio_sid=None):
+ if eio_sid is None: # pragma: no cover
+ eio_sid = self.sio.manager.eio_sid_from_sid(sid)
+ socket = self.sio.eio._get_socket(eio_sid)
+ environ = self.sio.environ.get(eio_sid, {})
+ tm = self.sio.manager._timestamps[sid] if sid in \
+ self.sio.manager._timestamps else 0
+ return {
+ 'id': sid,
+ 'clientId': eio_sid,
+ 'transport': 'websocket' if socket.upgraded else 'polling',
+ 'nsp': namespace,
+ 'data': {},
+ 'handshake': {
+ 'address': environ.get('REMOTE_ADDR', ''),
+ 'headers': {k[5:].lower(): v for k, v in environ.items()
+ if k.startswith('HTTP_')},
+ 'query': {k: v[0] if len(v) == 1 else v for k, v in parse_qs(
+ environ.get('QUERY_STRING', '')).items()},
+ 'secure': environ.get('wsgi.url_scheme', '') == 'https',
+ 'url': environ.get('PATH_INFO', ''),
+ 'issued': tm * 1000,
+ 'time': datetime.utcfromtimestamp(tm).isoformat() + 'Z'
+ if tm else '',
+ },
+ 'rooms': self.sio.manager.get_rooms(sid, namespace),
+ }
diff --git a/.venv/Lib/site-packages/socketio/asgi.py b/.venv/Lib/site-packages/socketio/asgi.py
new file mode 100644
index 0000000..23b094d
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/asgi.py
@@ -0,0 +1,47 @@
+import engineio
+
+
+class ASGIApp(engineio.ASGIApp): # pragma: no cover
+ """ASGI application middleware for Socket.IO.
+
+ This middleware dispatches traffic to an Socket.IO application. It can
+ also serve a list of static files to the client, or forward unrelated
+ HTTP traffic to another ASGI application.
+
+ :param socketio_server: The Socket.IO server. Must be an instance of the
+ ``socketio.AsyncServer`` class.
+ :param static_files: A dictionary with static file mapping rules. See the
+ documentation for details on this argument.
+ :param other_asgi_app: A separate ASGI app that receives all other traffic.
+ :param socketio_path: The endpoint where the Socket.IO application should
+ be installed. The default value is appropriate for
+ most cases. With a value of ``None``, all incoming
+ traffic is directed to the Socket.IO server, with the
+ assumption that routing, if necessary, is handled by
+ a different layer. When this option is set to
+ ``None``, ``static_files`` and ``other_asgi_app`` are
+ ignored.
+ :param on_startup: function to be called on application startup; can be
+ coroutine
+ :param on_shutdown: function to be called on application shutdown; can be
+ coroutine
+
+ Example usage::
+
+ import socketio
+ import uvicorn
+
+ sio = socketio.AsyncServer()
+ app = socketio.ASGIApp(sio, static_files={
+ '/': 'index.html',
+ '/static': './public',
+ })
+ uvicorn.run(app, host='127.0.0.1', port=5000)
+ """
+ def __init__(self, socketio_server, other_asgi_app=None,
+ static_files=None, socketio_path='socket.io',
+ on_startup=None, on_shutdown=None):
+ super().__init__(socketio_server, other_asgi_app,
+ static_files=static_files,
+ engineio_path=socketio_path, on_startup=on_startup,
+ on_shutdown=on_shutdown)
diff --git a/.venv/Lib/site-packages/socketio/async_admin.py b/.venv/Lib/site-packages/socketio/async_admin.py
new file mode 100644
index 0000000..162c566
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/async_admin.py
@@ -0,0 +1,398 @@
+import asyncio
+from datetime import datetime
+import functools
+import os
+import socket
+import time
+from urllib.parse import parse_qs
+from .admin import EventBuffer
+from .exceptions import ConnectionRefusedError
+
+HOSTNAME = socket.gethostname()
+PID = os.getpid()
+
+
+class InstrumentedAsyncServer:
+ def __init__(self, sio, auth=None, namespace='/admin', read_only=False,
+ server_id=None, mode='development', server_stats_interval=2):
+ """Instrument the Socket.IO server for monitoring with the `Socket.IO
+ Admin UI `_.
+ """
+ if auth is None:
+ raise ValueError('auth must be specified')
+ self.sio = sio
+ self.auth = auth
+ self.admin_namespace = namespace
+ self.read_only = read_only
+ self.server_id = server_id or (
+ self.sio.manager.host_id if hasattr(self.sio.manager, 'host_id')
+ else HOSTNAME
+ )
+ self.mode = mode
+ self.server_stats_interval = server_stats_interval
+ self.admin_queue = []
+ self.event_buffer = EventBuffer()
+
+ # task that emits "server_stats" every 2 seconds
+ self.stop_stats_event = None
+ self.stats_task = None
+
+ # monkey-patch the server to report metrics to the admin UI
+ self.instrument()
+
+ def instrument(self):
+ self.sio.on('connect', self.admin_connect,
+ namespace=self.admin_namespace)
+
+ if self.mode == 'development':
+ if not self.read_only: # pragma: no branch
+ self.sio.on('emit', self.admin_emit,
+ namespace=self.admin_namespace)
+ self.sio.on('join', self.admin_enter_room,
+ namespace=self.admin_namespace)
+ self.sio.on('leave', self.admin_leave_room,
+ namespace=self.admin_namespace)
+ self.sio.on('_disconnect', self.admin_disconnect,
+ namespace=self.admin_namespace)
+
+ # track socket connection times
+ self.sio.manager._timestamps = {}
+
+ # report socket.io connections
+ self.sio.manager.__connect = self.sio.manager.connect
+ self.sio.manager.connect = self._connect
+
+ # report socket.io disconnection
+ self.sio.manager.__disconnect = self.sio.manager.disconnect
+ self.sio.manager.disconnect = self._disconnect
+
+ # report join rooms
+ self.sio.manager.__basic_enter_room = \
+ self.sio.manager.basic_enter_room
+ self.sio.manager.basic_enter_room = self._basic_enter_room
+
+ # report leave rooms
+ self.sio.manager.__basic_leave_room = \
+ self.sio.manager.basic_leave_room
+ self.sio.manager.basic_leave_room = self._basic_leave_room
+
+ # report emit events
+ self.sio.manager.__emit = self.sio.manager.emit
+ self.sio.manager.emit = self._emit
+
+ # report receive events
+ self.sio.__handle_event_internal = self.sio._handle_event_internal
+ self.sio._handle_event_internal = self._handle_event_internal
+
+ # report engine.io connections
+ self.sio.eio.on('connect', self._handle_eio_connect)
+ self.sio.eio.on('disconnect', self._handle_eio_disconnect)
+
+ # report polling packets
+ from engineio.async_socket import AsyncSocket
+ self.sio.eio.__ok = self.sio.eio._ok
+ self.sio.eio._ok = self._eio_http_response
+ AsyncSocket.__handle_post_request = AsyncSocket.handle_post_request
+ AsyncSocket.handle_post_request = functools.partialmethod(
+ self.__class__._eio_handle_post_request, self)
+
+ # report websocket packets
+ AsyncSocket.__websocket_handler = AsyncSocket._websocket_handler
+ AsyncSocket._websocket_handler = functools.partialmethod(
+ self.__class__._eio_websocket_handler, self)
+
+ # report connected sockets with each ping
+ if self.mode == 'development':
+ AsyncSocket.__send_ping = AsyncSocket._send_ping
+ AsyncSocket._send_ping = functools.partialmethod(
+ self.__class__._eio_send_ping, self)
+
+ def uninstrument(self): # pragma: no cover
+ if self.mode == 'development':
+ self.sio.manager.connect = self.sio.manager.__connect
+ self.sio.manager.disconnect = self.sio.manager.__disconnect
+ self.sio.manager.basic_enter_room = \
+ self.sio.manager.__basic_enter_room
+ self.sio.manager.basic_leave_room = \
+ self.sio.manager.__basic_leave_room
+ self.sio.manager.emit = self.sio.manager.__emit
+ self.sio._handle_event_internal = self.sio.__handle_event_internal
+ self.sio.eio._ok = self.sio.eio.__ok
+
+ from engineio.async_socket import AsyncSocket
+ AsyncSocket.handle_post_request = AsyncSocket.__handle_post_request
+ AsyncSocket._websocket_handler = AsyncSocket.__websocket_handler
+ if self.mode == 'development':
+ AsyncSocket._send_ping = AsyncSocket.__send_ping
+
+ async def admin_connect(self, sid, environ, client_auth):
+ authenticated = True
+ if self.auth:
+ authenticated = False
+ if isinstance(self.auth, dict):
+ authenticated = client_auth == self.auth
+ elif isinstance(self.auth, list):
+ authenticated = client_auth in self.auth
+ else:
+ if asyncio.iscoroutinefunction(self.auth):
+ authenticated = await self.auth(client_auth)
+ else:
+ authenticated = self.auth(client_auth)
+ if not authenticated:
+ raise ConnectionRefusedError('authentication failed')
+
+ async def config(sid):
+ await self.sio.sleep(0.1)
+
+ # supported features
+ features = ['AGGREGATED_EVENTS']
+ if not self.read_only:
+ features += ['EMIT', 'JOIN', 'LEAVE', 'DISCONNECT', 'MJOIN',
+ 'MLEAVE', 'MDISCONNECT']
+ if self.mode == 'development':
+ features.append('ALL_EVENTS')
+ await self.sio.emit('config', {'supportedFeatures': features},
+ to=sid, namespace=self.admin_namespace)
+
+ # send current sockets
+ if self.mode == 'development':
+ all_sockets = []
+ for nsp in self.sio.manager.get_namespaces():
+ for sid, eio_sid in self.sio.manager.get_participants(
+ nsp, None):
+ all_sockets.append(
+ self.serialize_socket(sid, nsp, eio_sid))
+ await self.sio.emit('all_sockets', all_sockets, to=sid,
+ namespace=self.admin_namespace)
+
+ self.sio.start_background_task(config, sid)
+ self.stop_stats_event = self.sio.eio.create_event()
+ self.stats_task = self.sio.start_background_task(
+ self._emit_server_stats)
+
+ async def admin_emit(self, _, namespace, room_filter, event, *data):
+ await self.sio.emit(event, data, to=room_filter, namespace=namespace)
+
+ async def admin_enter_room(self, _, namespace, room, room_filter=None):
+ for sid, _ in self.sio.manager.get_participants(
+ namespace, room_filter):
+ await self.sio.enter_room(sid, room, namespace=namespace)
+
+ async def admin_leave_room(self, _, namespace, room, room_filter=None):
+ for sid, _ in self.sio.manager.get_participants(
+ namespace, room_filter):
+ await self.sio.leave_room(sid, room, namespace=namespace)
+
+ async def admin_disconnect(self, _, namespace, close, room_filter=None):
+ for sid, _ in self.sio.manager.get_participants(
+ namespace, room_filter):
+ await self.sio.disconnect(sid, namespace=namespace)
+
+ async def shutdown(self):
+ if self.stats_task: # pragma: no branch
+ self.stop_stats_event.set()
+ await asyncio.gather(self.stats_task)
+
+ async def _connect(self, eio_sid, namespace):
+ sid = await self.sio.manager.__connect(eio_sid, namespace)
+ t = time.time()
+ self.sio.manager._timestamps[sid] = t
+ serialized_socket = self.serialize_socket(sid, namespace, eio_sid)
+ await self.sio.emit('socket_connected', (
+ serialized_socket,
+ datetime.utcfromtimestamp(t).isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return sid
+
+ async def _disconnect(self, sid, namespace, **kwargs):
+ del self.sio.manager._timestamps[sid]
+ await self.sio.emit('socket_disconnected', (
+ namespace,
+ sid,
+ 'N/A',
+ datetime.utcnow().isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return await self.sio.manager.__disconnect(sid, namespace, **kwargs)
+
+ async def _check_for_upgrade(self, eio_sid, sid,
+ namespace): # pragma: no cover
+ for _ in range(5):
+ await self.sio.sleep(5)
+ try:
+ if self.sio.eio._get_socket(eio_sid).upgraded:
+ await self.sio.emit('socket_updated', {
+ 'id': sid,
+ 'nsp': namespace,
+ 'transport': 'websocket',
+ }, namespace=self.admin_namespace)
+ break
+ except KeyError:
+ pass
+
+ def _basic_enter_room(self, sid, namespace, room, eio_sid=None):
+ ret = self.sio.manager.__basic_enter_room(sid, namespace, room,
+ eio_sid)
+ if room:
+ self.admin_queue.append(('room_joined', (
+ namespace,
+ room,
+ sid,
+ datetime.utcnow().isoformat() + 'Z',
+ )))
+ return ret
+
+ def _basic_leave_room(self, sid, namespace, room):
+ if room:
+ self.admin_queue.append(('room_left', (
+ namespace,
+ room,
+ sid,
+ datetime.utcnow().isoformat() + 'Z',
+ )))
+ return self.sio.manager.__basic_leave_room(sid, namespace, room)
+
+ async def _emit(self, event, data, namespace, room=None, skip_sid=None,
+ callback=None, **kwargs):
+ ret = await self.sio.manager.__emit(
+ event, data, namespace, room=room, skip_sid=skip_sid,
+ callback=callback, **kwargs)
+ if namespace != self.admin_namespace:
+ event_data = [event] + list(data) if isinstance(data, tuple) \
+ else [data]
+ if not isinstance(skip_sid, list): # pragma: no branch
+ skip_sid = [skip_sid]
+ for sid, _ in self.sio.manager.get_participants(namespace, room):
+ if sid not in skip_sid:
+ await self.sio.emit('event_sent', (
+ namespace,
+ sid,
+ event_data,
+ datetime.utcnow().isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return ret
+
+ async def _handle_event_internal(self, server, sid, eio_sid, data,
+ namespace, id):
+ ret = await self.sio.__handle_event_internal(server, sid, eio_sid,
+ data, namespace, id)
+ await self.sio.emit('event_received', (
+ namespace,
+ sid,
+ data,
+ datetime.utcnow().isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return ret
+
+ async def _handle_eio_connect(self, eio_sid, environ):
+ if self.stop_stats_event is None:
+ self.stop_stats_event = self.sio.eio.create_event()
+ self.stats_task = self.sio.start_background_task(
+ self._emit_server_stats)
+
+ self.event_buffer.push('rawConnection')
+ return await self.sio._handle_eio_connect(eio_sid, environ)
+
+ async def _handle_eio_disconnect(self, eio_sid):
+ self.event_buffer.push('rawDisconnection')
+ return await self.sio._handle_eio_disconnect(eio_sid)
+
+ def _eio_http_response(self, packets=None, headers=None, jsonp_index=None):
+ ret = self.sio.eio.__ok(packets=packets, headers=headers,
+ jsonp_index=jsonp_index)
+ self.event_buffer.push('packetsOut')
+ self.event_buffer.push('bytesOut', len(ret['response']))
+ return ret
+
+ async def _eio_handle_post_request(socket, self, environ):
+ ret = await socket.__handle_post_request(environ)
+ self.event_buffer.push('packetsIn')
+ self.event_buffer.push(
+ 'bytesIn', int(environ.get('CONTENT_LENGTH', 0)))
+ return ret
+
+ async def _eio_websocket_handler(socket, self, ws):
+ async def _send(ws, data):
+ self.event_buffer.push('packetsOut')
+ self.event_buffer.push('bytesOut', len(data))
+ return await ws.__send(data)
+
+ async def _wait(ws):
+ ret = await ws.__wait()
+ self.event_buffer.push('packetsIn')
+ self.event_buffer.push('bytesIn', len(ret or ''))
+ return ret
+
+ ws.__send = ws.send
+ ws.send = functools.partial(_send, ws)
+ ws.__wait = ws.wait
+ ws.wait = functools.partial(_wait, ws)
+ return await socket.__websocket_handler(ws)
+
+ async def _eio_send_ping(socket, self): # pragma: no cover
+ eio_sid = socket.sid
+ t = time.time()
+ for namespace in self.sio.manager.get_namespaces():
+ sid = self.sio.manager.sid_from_eio_sid(eio_sid, namespace)
+ if sid:
+ serialized_socket = self.serialize_socket(sid, namespace,
+ eio_sid)
+ await self.sio.emit('socket_connected', (
+ serialized_socket,
+ datetime.utcfromtimestamp(t).isoformat() + 'Z',
+ ), namespace=self.admin_namespace)
+ return await socket.__send_ping()
+
+ async def _emit_server_stats(self):
+ start_time = time.time()
+ namespaces = list(self.sio.handlers.keys())
+ namespaces.sort()
+ while not self.stop_stats_event.is_set():
+ await self.sio.sleep(self.server_stats_interval)
+ await self.sio.emit('server_stats', {
+ 'serverId': self.server_id,
+ 'hostname': HOSTNAME,
+ 'pid': PID,
+ 'uptime': time.time() - start_time,
+ 'clientsCount': len(self.sio.eio.sockets),
+ 'pollingClientsCount': len(
+ [s for s in self.sio.eio.sockets.values()
+ if not s.upgraded]),
+ 'aggregatedEvents': self.event_buffer.get_and_clear(),
+ 'namespaces': [{
+ 'name': nsp,
+ 'socketsCount': len(self.sio.manager.rooms.get(
+ nsp, {None: []}).get(None, []))
+ } for nsp in namespaces],
+ }, namespace=self.admin_namespace)
+ while self.admin_queue:
+ event, args = self.admin_queue.pop(0)
+ await self.sio.emit(event, args,
+ namespace=self.admin_namespace)
+
+ def serialize_socket(self, sid, namespace, eio_sid=None):
+ if eio_sid is None: # pragma: no cover
+ eio_sid = self.sio.manager.eio_sid_from_sid(sid)
+ socket = self.sio.eio._get_socket(eio_sid)
+ environ = self.sio.environ.get(eio_sid, {})
+ tm = self.sio.manager._timestamps[sid] if sid in \
+ self.sio.manager._timestamps else 0
+ return {
+ 'id': sid,
+ 'clientId': eio_sid,
+ 'transport': 'websocket' if socket.upgraded else 'polling',
+ 'nsp': namespace,
+ 'data': {},
+ 'handshake': {
+ 'address': environ.get('REMOTE_ADDR', ''),
+ 'headers': {k[5:].lower(): v for k, v in environ.items()
+ if k.startswith('HTTP_')},
+ 'query': {k: v[0] if len(v) == 1 else v for k, v in parse_qs(
+ environ.get('QUERY_STRING', '')).items()},
+ 'secure': environ.get('wsgi.url_scheme', '') == 'https',
+ 'url': environ.get('PATH_INFO', ''),
+ 'issued': tm * 1000,
+ 'time': datetime.utcfromtimestamp(tm).isoformat() + 'Z'
+ if tm else '',
+ },
+ 'rooms': self.sio.manager.get_rooms(sid, namespace),
+ }
diff --git a/.venv/Lib/site-packages/socketio/async_aiopika_manager.py b/.venv/Lib/site-packages/socketio/async_aiopika_manager.py
new file mode 100644
index 0000000..b6f09b8
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/async_aiopika_manager.py
@@ -0,0 +1,126 @@
+import asyncio
+import pickle
+
+from .async_pubsub_manager import AsyncPubSubManager
+
+try:
+ import aio_pika
+except ImportError:
+ aio_pika = None
+
+
+class AsyncAioPikaManager(AsyncPubSubManager): # pragma: no cover
+ """Client manager that uses aio_pika for inter-process messaging under
+ asyncio.
+
+ This class implements a client manager backend for event sharing across
+ multiple processes, using RabbitMQ
+
+ To use a aio_pika backend, initialize the :class:`Server` instance as
+ follows::
+
+ url = 'amqp://user:password@hostname:port//'
+ server = socketio.Server(client_manager=socketio.AsyncAioPikaManager(
+ url))
+
+ :param url: The connection URL for the backend messaging queue. Example
+ connection URLs are ``'amqp://guest:guest@localhost:5672//'``
+ for RabbitMQ.
+ :param channel: The channel name on which the server sends and receives
+ notifications. Must be the same in all the servers.
+ With this manager, the channel name is the exchange name
+ in rabbitmq
+ :param write_only: If set to ``True``, only initialize to emit events. The
+ default of ``False`` initializes the class for emitting
+ and receiving.
+ """
+
+ name = 'asyncaiopika'
+
+ def __init__(self, url='amqp://guest:guest@localhost:5672//',
+ channel='socketio', write_only=False, logger=None):
+ if aio_pika is None:
+ raise RuntimeError('aio_pika package is not installed '
+ '(Run "pip install aio_pika" in your '
+ 'virtualenv).')
+ self.url = url
+ self._lock = asyncio.Lock()
+ self.publisher_connection = None
+ self.publisher_channel = None
+ self.publisher_exchange = None
+ super().__init__(channel=channel, write_only=write_only, logger=logger)
+
+ async def _connection(self):
+ return await aio_pika.connect_robust(self.url)
+
+ async def _channel(self, connection):
+ return await connection.channel()
+
+ async def _exchange(self, channel):
+ return await channel.declare_exchange(self.channel,
+ aio_pika.ExchangeType.FANOUT)
+
+ async def _queue(self, channel, exchange):
+ queue = await channel.declare_queue(durable=False,
+ arguments={'x-expires': 300000})
+ await queue.bind(exchange)
+ return queue
+
+ async def _publish(self, data):
+ if self.publisher_connection is None:
+ async with self._lock:
+ if self.publisher_connection is None:
+ self.publisher_connection = await self._connection()
+ self.publisher_channel = await self._channel(
+ self.publisher_connection
+ )
+ self.publisher_exchange = await self._exchange(
+ self.publisher_channel
+ )
+ retry = True
+ while True:
+ try:
+ await self.publisher_exchange.publish(
+ aio_pika.Message(
+ body=pickle.dumps(data),
+ delivery_mode=aio_pika.DeliveryMode.PERSISTENT
+ ), routing_key='*',
+ )
+ break
+ except aio_pika.AMQPException:
+ if retry:
+ self._get_logger().error('Cannot publish to rabbitmq... '
+ 'retrying')
+ retry = False
+ else:
+ self._get_logger().error(
+ 'Cannot publish to rabbitmq... giving up')
+ break
+ except aio_pika.exceptions.ChannelInvalidStateError:
+ # aio_pika raises this exception when the task is cancelled
+ raise asyncio.CancelledError()
+
+ async def _listen(self):
+ async with (await self._connection()) as connection:
+ channel = await self._channel(connection)
+ await channel.set_qos(prefetch_count=1)
+ exchange = await self._exchange(channel)
+ queue = await self._queue(channel, exchange)
+
+ retry_sleep = 1
+ while True:
+ try:
+ async with queue.iterator() as queue_iter:
+ async for message in queue_iter:
+ async with message.process():
+ yield pickle.loads(message.body)
+ retry_sleep = 1
+ except aio_pika.AMQPException:
+ self._get_logger().error(
+ 'Cannot receive from rabbitmq... '
+ 'retrying in {} secs'.format(retry_sleep))
+ await asyncio.sleep(retry_sleep)
+ retry_sleep = min(retry_sleep * 2, 60)
+ except aio_pika.exceptions.ChannelInvalidStateError:
+ # aio_pika raises this exception when the task is cancelled
+ raise asyncio.CancelledError()
diff --git a/.venv/Lib/site-packages/socketio/async_client.py b/.venv/Lib/site-packages/socketio/async_client.py
new file mode 100644
index 0000000..5fd8daa
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/async_client.py
@@ -0,0 +1,586 @@
+import asyncio
+import logging
+import random
+
+import engineio
+
+from . import base_client
+from . import exceptions
+from . import packet
+
+default_logger = logging.getLogger('socketio.client')
+
+
+class AsyncClient(base_client.BaseClient):
+ """A Socket.IO client for asyncio.
+
+ This class implements a fully compliant Socket.IO web client with support
+ for websocket and long-polling transports.
+
+ :param reconnection: ``True`` if the client should automatically attempt to
+ reconnect to the server after an interruption, or
+ ``False`` to not reconnect. The default is ``True``.
+ :param reconnection_attempts: How many reconnection attempts to issue
+ before giving up, or 0 for infinite attempts.
+ The default is 0.
+ :param reconnection_delay: How long to wait in seconds before the first
+ reconnection attempt. Each successive attempt
+ doubles this delay.
+ :param reconnection_delay_max: The maximum delay between reconnection
+ attempts.
+ :param randomization_factor: Randomization amount for each delay between
+ reconnection attempts. The default is 0.5,
+ which means that each delay is randomly
+ adjusted by +/- 50%.
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. The default is
+ ``False``. Note that fatal errors are logged even when
+ ``logger`` is ``False``.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions.
+ :param handle_sigint: Set to ``True`` to automatically handle disconnection
+ when the process is interrupted, or to ``False`` to
+ leave interrupt handling to the calling application.
+ Interrupt handling can only be enabled when the
+ client instance is created in the main thread.
+
+ The Engine.IO configuration supports the following settings:
+
+ :param request_timeout: A timeout in seconds for requests. The default is
+ 5 seconds.
+ :param http_session: an initialized ``aiohttp.ClientSession`` object to be
+ used when sending requests to the server. Use it if
+ you need to add special client options such as proxy
+ servers, SSL certificates, custom CA bundle, etc.
+ :param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
+ skip SSL certificate verification, allowing
+ connections to servers with self signed certificates.
+ The default is ``True``.
+ :param websocket_extra_options: Dictionary containing additional keyword
+ arguments passed to
+ ``websocket.create_connection()``.
+ :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
+ a logger object to use. To disable logging set to
+ ``False``. The default is ``False``. Note that
+ fatal errors are logged even when
+ ``engineio_logger`` is ``False``.
+ """
+ def is_asyncio_based(self):
+ return True
+
+ async def connect(self, url, headers={}, auth=None, transports=None,
+ namespaces=None, socketio_path='socket.io', wait=True,
+ wait_timeout=1, retry=False):
+ """Connect to a Socket.IO server.
+
+ :param url: The URL of the Socket.IO server. It can include custom
+ query string parameters if required by the server. If a
+ function is provided, the client will invoke it to obtain
+ the URL each time a connection or reconnection is
+ attempted.
+ :param headers: A dictionary with custom headers to send with the
+ connection request. If a function is provided, the
+ client will invoke it to obtain the headers dictionary
+ each time a connection or reconnection is attempted.
+ :param auth: Authentication data passed to the server with the
+ connection request, normally a dictionary with one or
+ more string key/value pairs. If a function is provided,
+ the client will invoke it to obtain the authentication
+ data each time a connection or reconnection is attempted.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. If not
+ given, the polling transport is connected first,
+ then an upgrade to websocket is attempted.
+ :param namespaces: The namespaces to connect as a string or list of
+ strings. If not given, the namespaces that have
+ registered event handlers are connected.
+ :param socketio_path: The endpoint where the Socket.IO server is
+ installed. The default value is appropriate for
+ most cases.
+ :param wait: if set to ``True`` (the default) the call only returns
+ when all the namespaces are connected. If set to
+ ``False``, the call returns as soon as the Engine.IO
+ transport is connected, and the namespaces will connect
+ in the background.
+ :param wait_timeout: How long the client should wait for the
+ connection. The default is 1 second. This
+ argument is only considered when ``wait`` is set
+ to ``True``.
+ :param retry: Apply the reconnection logic if the initial connection
+ attempt fails. The default is ``False``.
+
+ Note: this method is a coroutine.
+
+ Example usage::
+
+ sio = socketio.AsyncClient()
+ sio.connect('http://localhost:5000')
+ """
+ if self.connected:
+ raise exceptions.ConnectionError('Already connected')
+
+ self.connection_url = url
+ self.connection_headers = headers
+ self.connection_auth = auth
+ self.connection_transports = transports
+ self.connection_namespaces = namespaces
+ self.socketio_path = socketio_path
+
+ if namespaces is None:
+ namespaces = list(set(self.handlers.keys()).union(
+ set(self.namespace_handlers.keys())))
+ if '*' in namespaces:
+ namespaces.remove('*')
+ if len(namespaces) == 0:
+ namespaces = ['/']
+ elif isinstance(namespaces, str):
+ namespaces = [namespaces]
+ self.connection_namespaces = namespaces
+ self.namespaces = {}
+ if self._connect_event is None:
+ self._connect_event = self.eio.create_event()
+ else:
+ self._connect_event.clear()
+ real_url = await self._get_real_value(self.connection_url)
+ real_headers = await self._get_real_value(self.connection_headers)
+ try:
+ await self.eio.connect(real_url, headers=real_headers,
+ transports=transports,
+ engineio_path=socketio_path)
+ except engineio.exceptions.ConnectionError as exc:
+ for n in self.connection_namespaces:
+ await self._trigger_event(
+ 'connect_error', n,
+ exc.args[1] if len(exc.args) > 1 else exc.args[0])
+ if retry: # pragma: no cover
+ await self._handle_reconnect()
+ if self.eio.state == 'connected':
+ return
+ raise exceptions.ConnectionError(exc.args[0]) from None
+
+ if wait:
+ try:
+ while True:
+ await asyncio.wait_for(self._connect_event.wait(),
+ wait_timeout)
+ self._connect_event.clear()
+ if set(self.namespaces) == set(self.connection_namespaces):
+ break
+ except asyncio.TimeoutError:
+ pass
+ if set(self.namespaces) != set(self.connection_namespaces):
+ await self.disconnect()
+ raise exceptions.ConnectionError(
+ 'One or more namespaces failed to connect')
+
+ self.connected = True
+
+ async def wait(self):
+ """Wait until the connection with the server ends.
+
+ Client applications can use this function to block the main thread
+ during the life of the connection.
+
+ Note: this method is a coroutine.
+ """
+ while True:
+ await self.eio.wait()
+ await self.sleep(1) # give the reconnect task time to start up
+ if not self._reconnect_task:
+ break
+ await self._reconnect_task
+ if self.eio.state != 'connected':
+ break
+
+ async def emit(self, event, data=None, namespace=None, callback=None):
+ """Emit a custom event to the server.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param callback: If given, this function will be called to acknowledge
+ the server has received the message. The arguments
+ that will be passed to the function are those provided
+ by the server.
+
+ Note: this method is not designed to be used concurrently. If multiple
+ tasks are emitting at the same time on the same client connection, then
+ messages composed of multiple packets may end up being sent in an
+ incorrect sequence. Use standard concurrency solutions (such as a Lock
+ object) to prevent this situation.
+
+ Note 2: this method is a coroutine.
+ """
+ namespace = namespace or '/'
+ if namespace not in self.namespaces:
+ raise exceptions.BadNamespaceError(
+ namespace + ' is not a connected namespace.')
+ self.logger.info('Emitting event "%s" [%s]', event, namespace)
+ if callback is not None:
+ id = self._generate_ack_id(namespace, callback)
+ else:
+ id = None
+ # tuples are expanded to multiple arguments, everything else is sent
+ # as a single argument
+ if isinstance(data, tuple):
+ data = list(data)
+ elif data is not None:
+ data = [data]
+ else:
+ data = []
+ await self._send_packet(self.packet_class(
+ packet.EVENT, namespace=namespace, data=[event] + data, id=id))
+
+ async def send(self, data, namespace=None, callback=None):
+ """Send a message to the server.
+
+ This function emits an event with the name ``'message'``. Use
+ :func:`emit` to issue custom event names.
+
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param callback: If given, this function will be called to acknowledge
+ the server has received the message. The arguments
+ that will be passed to the function are those provided
+ by the server.
+
+ Note: this method is a coroutine.
+ """
+ await self.emit('message', data=data, namespace=namespace,
+ callback=callback)
+
+ async def call(self, event, data=None, namespace=None, timeout=60):
+ """Emit a custom event to the server and wait for the response.
+
+ This method issues an emit with a callback and waits for the callback
+ to be invoked before returning. If the callback isn't invoked before
+ the timeout, then a ``TimeoutError`` exception is raised. If the
+ Socket.IO connection drops during the wait, this method still waits
+ until the specified timeout.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the server acknowledges the event, then a
+ ``TimeoutError`` exception is raised.
+
+ Note: this method is not designed to be used concurrently. If multiple
+ tasks are emitting at the same time on the same client connection, then
+ messages composed of multiple packets may end up being sent in an
+ incorrect sequence. Use standard concurrency solutions (such as a Lock
+ object) to prevent this situation.
+
+ Note 2: this method is a coroutine.
+ """
+ callback_event = self.eio.create_event()
+ callback_args = []
+
+ def event_callback(*args):
+ callback_args.append(args)
+ callback_event.set()
+
+ await self.emit(event, data=data, namespace=namespace,
+ callback=event_callback)
+ try:
+ await asyncio.wait_for(callback_event.wait(), timeout)
+ except asyncio.TimeoutError:
+ raise exceptions.TimeoutError() from None
+ return callback_args[0] if len(callback_args[0]) > 1 \
+ else callback_args[0][0] if len(callback_args[0]) == 1 \
+ else None
+
+ async def disconnect(self):
+ """Disconnect from the server.
+
+ Note: this method is a coroutine.
+ """
+ # here we just request the disconnection
+ # later in _handle_eio_disconnect we invoke the disconnect handler
+ for n in self.namespaces:
+ await self._send_packet(self.packet_class(packet.DISCONNECT,
+ namespace=n))
+ await self.eio.disconnect(abort=True)
+
+ async def shutdown(self):
+ """Stop the client.
+
+ If the client is connected to a server, it is disconnected. If the
+ client is attempting to reconnect to server, the reconnection attempts
+ are stopped. If the client is not connected to a server and is not
+ attempting to reconnect, then this function does nothing.
+ """
+ if self.connected:
+ await self.disconnect()
+ elif self._reconnect_task: # pragma: no branch
+ self._reconnect_abort.set()
+ print(self._reconnect_task)
+ await self._reconnect_task
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task using the appropriate async model.
+
+ This is a utility function that applications can use to start a
+ background task using the method that is compatible with the
+ selected async mode.
+
+ :param target: the target function to execute.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ The return value is a ``asyncio.Task`` object.
+ """
+ return self.eio.start_background_task(target, *args, **kwargs)
+
+ async def sleep(self, seconds=0):
+ """Sleep for the requested amount of time using the appropriate async
+ model.
+
+ This is a utility function that applications can use to put a task to
+ sleep without having to worry about using the correct call for the
+ selected async mode.
+
+ Note: this method is a coroutine.
+ """
+ return await self.eio.sleep(seconds)
+
+ async def _get_real_value(self, value):
+ """Return the actual value, for parameters that can also be given as
+ callables."""
+ if not callable(value):
+ return value
+ if asyncio.iscoroutinefunction(value):
+ return await value()
+ return value()
+
+ async def _send_packet(self, pkt):
+ """Send a Socket.IO packet to the server."""
+ encoded_packet = pkt.encode()
+ if isinstance(encoded_packet, list):
+ for ep in encoded_packet:
+ await self.eio.send(ep)
+ else:
+ await self.eio.send(encoded_packet)
+
+ async def _handle_connect(self, namespace, data):
+ namespace = namespace or '/'
+ if namespace not in self.namespaces:
+ self.logger.info('Namespace {} is connected'.format(namespace))
+ self.namespaces[namespace] = (data or {}).get('sid', self.sid)
+ await self._trigger_event('connect', namespace=namespace)
+ self._connect_event.set()
+
+ async def _handle_disconnect(self, namespace):
+ if not self.connected:
+ return
+ namespace = namespace or '/'
+ await self._trigger_event('disconnect', namespace=namespace)
+ await self._trigger_event('__disconnect_final', namespace=namespace)
+ if namespace in self.namespaces:
+ del self.namespaces[namespace]
+ if not self.namespaces:
+ self.connected = False
+ await self.eio.disconnect(abort=True)
+
+ async def _handle_event(self, namespace, id, data):
+ namespace = namespace or '/'
+ self.logger.info('Received event "%s" [%s]', data[0], namespace)
+ r = await self._trigger_event(data[0], namespace, *data[1:])
+ if id is not None:
+ # send ACK packet with the response returned by the handler
+ # tuples are expanded as multiple arguments
+ if r is None:
+ data = []
+ elif isinstance(r, tuple):
+ data = list(r)
+ else:
+ data = [r]
+ await self._send_packet(self.packet_class(
+ packet.ACK, namespace=namespace, id=id, data=data))
+
+ async def _handle_ack(self, namespace, id, data):
+ namespace = namespace or '/'
+ self.logger.info('Received ack [%s]', namespace)
+ callback = None
+ try:
+ callback = self.callbacks[namespace][id]
+ except KeyError:
+ # if we get an unknown callback we just ignore it
+ self.logger.warning('Unknown callback received, ignoring.')
+ else:
+ del self.callbacks[namespace][id]
+ if callback is not None:
+ if asyncio.iscoroutinefunction(callback):
+ await callback(*data)
+ else:
+ callback(*data)
+
+ async def _handle_error(self, namespace, data):
+ namespace = namespace or '/'
+ self.logger.info('Connection to namespace {} was rejected'.format(
+ namespace))
+ if data is None:
+ data = tuple()
+ elif not isinstance(data, (tuple, list)):
+ data = (data,)
+ await self._trigger_event('connect_error', namespace, *data)
+ self._connect_event.set()
+ if namespace in self.namespaces:
+ del self.namespaces[namespace]
+ if namespace == '/':
+ self.namespaces = {}
+ self.connected = False
+
+ async def _trigger_event(self, event, namespace, *args):
+ """Invoke an application event handler."""
+ # first see if we have an explicit handler for the event
+ handler, args = self._get_event_handler(event, namespace, args)
+ if handler:
+ if asyncio.iscoroutinefunction(handler):
+ try:
+ ret = await handler(*args)
+ except asyncio.CancelledError: # pragma: no cover
+ ret = None
+ else:
+ ret = handler(*args)
+ return ret
+
+ # or else, forward the event to a namepsace handler if one exists
+ handler, args = self._get_namespace_handler(namespace, args)
+ if handler:
+ return await handler.trigger_event(event, *args)
+
+ async def _handle_reconnect(self):
+ if self._reconnect_abort is None: # pragma: no cover
+ self._reconnect_abort = self.eio.create_event()
+ self._reconnect_abort.clear()
+ base_client.reconnecting_clients.append(self)
+ attempt_count = 0
+ current_delay = self.reconnection_delay
+ while True:
+ delay = current_delay
+ current_delay *= 2
+ if delay > self.reconnection_delay_max:
+ delay = self.reconnection_delay_max
+ delay += self.randomization_factor * (2 * random.random() - 1)
+ self.logger.info(
+ 'Connection failed, new attempt in {:.02f} seconds'.format(
+ delay))
+ abort = False
+ try:
+ await asyncio.wait_for(self._reconnect_abort.wait(), delay)
+ abort = True
+ except asyncio.TimeoutError:
+ pass
+ except asyncio.CancelledError: # pragma: no cover
+ abort = True
+ if abort:
+ self.logger.info('Reconnect task aborted')
+ for n in self.connection_namespaces:
+ await self._trigger_event('__disconnect_final',
+ namespace=n)
+ break
+ attempt_count += 1
+ try:
+ await self.connect(self.connection_url,
+ headers=self.connection_headers,
+ auth=self.connection_auth,
+ transports=self.connection_transports,
+ namespaces=self.connection_namespaces,
+ socketio_path=self.socketio_path,
+ retry=False)
+ except (exceptions.ConnectionError, ValueError):
+ pass
+ else:
+ self.logger.info('Reconnection successful')
+ self._reconnect_task = None
+ break
+ if self.reconnection_attempts and \
+ attempt_count >= self.reconnection_attempts:
+ self.logger.info(
+ 'Maximum reconnection attempts reached, giving up')
+ for n in self.connection_namespaces:
+ await self._trigger_event('__disconnect_final',
+ namespace=n)
+ break
+ base_client.reconnecting_clients.remove(self)
+
+ async def _handle_eio_connect(self):
+ """Handle the Engine.IO connection event."""
+ self.logger.info('Engine.IO connection established')
+ self.sid = self.eio.sid
+ real_auth = await self._get_real_value(self.connection_auth) or {}
+ for n in self.connection_namespaces:
+ await self._send_packet(self.packet_class(
+ packet.CONNECT, data=real_auth, namespace=n))
+
+ async def _handle_eio_message(self, data):
+ """Dispatch Engine.IO messages."""
+ if self._binary_packet:
+ pkt = self._binary_packet
+ if pkt.add_attachment(data):
+ self._binary_packet = None
+ if pkt.packet_type == packet.BINARY_EVENT:
+ await self._handle_event(pkt.namespace, pkt.id, pkt.data)
+ else:
+ await self._handle_ack(pkt.namespace, pkt.id, pkt.data)
+ else:
+ pkt = self.packet_class(encoded_packet=data)
+ if pkt.packet_type == packet.CONNECT:
+ await self._handle_connect(pkt.namespace, pkt.data)
+ elif pkt.packet_type == packet.DISCONNECT:
+ await self._handle_disconnect(pkt.namespace)
+ elif pkt.packet_type == packet.EVENT:
+ await self._handle_event(pkt.namespace, pkt.id, pkt.data)
+ elif pkt.packet_type == packet.ACK:
+ await self._handle_ack(pkt.namespace, pkt.id, pkt.data)
+ elif pkt.packet_type == packet.BINARY_EVENT or \
+ pkt.packet_type == packet.BINARY_ACK:
+ self._binary_packet = pkt
+ elif pkt.packet_type == packet.CONNECT_ERROR:
+ await self._handle_error(pkt.namespace, pkt.data)
+ else:
+ raise ValueError('Unknown packet type.')
+
+ async def _handle_eio_disconnect(self):
+ """Handle the Engine.IO disconnection event."""
+ self.logger.info('Engine.IO connection dropped')
+ will_reconnect = self.reconnection and self.eio.state == 'connected'
+ if self.connected:
+ for n in self.namespaces:
+ await self._trigger_event('disconnect', namespace=n)
+ if not will_reconnect:
+ await self._trigger_event('__disconnect_final',
+ namespace=n)
+ self.namespaces = {}
+ self.connected = False
+ self.callbacks = {}
+ self._binary_packet = None
+ self.sid = None
+ if will_reconnect:
+ self._reconnect_task = self.start_background_task(
+ self._handle_reconnect)
+
+ def _engineio_client_class(self):
+ return engineio.AsyncClient
diff --git a/.venv/Lib/site-packages/socketio/async_manager.py b/.venv/Lib/site-packages/socketio/async_manager.py
new file mode 100644
index 0000000..dcf79cf
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/async_manager.py
@@ -0,0 +1,119 @@
+import asyncio
+
+from engineio import packet as eio_packet
+from socketio import packet
+from .base_manager import BaseManager
+
+
+class AsyncManager(BaseManager):
+ """Manage a client list for an asyncio server."""
+ async def can_disconnect(self, sid, namespace):
+ return self.is_connected(sid, namespace)
+
+ async def emit(self, event, data, namespace, room=None, skip_sid=None,
+ callback=None, **kwargs):
+ """Emit a message to a single client, a room, or all the clients
+ connected to the namespace.
+
+ Note: this method is a coroutine.
+ """
+ if namespace not in self.rooms:
+ return
+ if isinstance(data, tuple):
+ # tuples are expanded to multiple arguments, everything else is
+ # sent as a single argument
+ data = list(data)
+ elif data is not None:
+ data = [data]
+ else:
+ data = []
+ if not isinstance(skip_sid, list):
+ skip_sid = [skip_sid]
+ tasks = []
+ if not callback:
+ # when callbacks aren't used the packets sent to each recipient are
+ # identical, so they can be generated once and reused
+ pkt = self.server.packet_class(
+ packet.EVENT, namespace=namespace, data=[event] + data)
+ encoded_packet = pkt.encode()
+ if not isinstance(encoded_packet, list):
+ encoded_packet = [encoded_packet]
+ eio_pkt = [eio_packet.Packet(eio_packet.MESSAGE, p)
+ for p in encoded_packet]
+ for sid, eio_sid in self.get_participants(namespace, room):
+ if sid not in skip_sid:
+ for p in eio_pkt:
+ tasks.append(asyncio.create_task(
+ self.server._send_eio_packet(eio_sid, p)))
+ else:
+ # callbacks are used, so each recipient must be sent a packet that
+ # contains a unique callback id
+ # note that callbacks when addressing a group of people are
+ # implemented but not tested or supported
+ for sid, eio_sid in self.get_participants(namespace, room):
+ if sid not in skip_sid: # pragma: no branch
+ id = self._generate_ack_id(sid, callback)
+ pkt = self.server.packet_class(
+ packet.EVENT, namespace=namespace, data=[event] + data,
+ id=id)
+ tasks.append(asyncio.create_task(
+ self.server._send_packet(eio_sid, pkt)))
+ if tasks == []: # pragma: no cover
+ return
+ await asyncio.wait(tasks)
+
+ async def connect(self, eio_sid, namespace):
+ """Register a client connection to a namespace.
+
+ Note: this method is a coroutine.
+ """
+ return super().connect(eio_sid, namespace)
+
+ async def disconnect(self, sid, namespace, **kwargs):
+ """Disconnect a client.
+
+ Note: this method is a coroutine.
+ """
+ return self.basic_disconnect(sid, namespace, **kwargs)
+
+ async def enter_room(self, sid, namespace, room, eio_sid=None):
+ """Add a client to a room.
+
+ Note: this method is a coroutine.
+ """
+ return self.basic_enter_room(sid, namespace, room, eio_sid=eio_sid)
+
+ async def leave_room(self, sid, namespace, room):
+ """Remove a client from a room.
+
+ Note: this method is a coroutine.
+ """
+ return self.basic_leave_room(sid, namespace, room)
+
+ async def close_room(self, room, namespace):
+ """Remove all participants from a room.
+
+ Note: this method is a coroutine.
+ """
+ return self.basic_close_room(room, namespace)
+
+ async def trigger_callback(self, sid, id, data):
+ """Invoke an application callback.
+
+ Note: this method is a coroutine.
+ """
+ callback = None
+ try:
+ callback = self.callbacks[sid][id]
+ except KeyError:
+ # if we get an unknown callback we just ignore it
+ self._get_logger().warning('Unknown callback received, ignoring.')
+ else:
+ del self.callbacks[sid][id]
+ if callback is not None:
+ ret = callback(*data)
+ if asyncio.iscoroutine(ret):
+ try:
+ await ret
+ except asyncio.CancelledError: # pragma: no cover
+ pass
diff --git a/.venv/Lib/site-packages/socketio/async_namespace.py b/.venv/Lib/site-packages/socketio/async_namespace.py
new file mode 100644
index 0000000..0a2e051
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/async_namespace.py
@@ -0,0 +1,255 @@
+import asyncio
+
+from socketio import base_namespace
+
+
+class AsyncNamespace(base_namespace.BaseServerNamespace):
+ """Base class for asyncio server-side class-based namespaces.
+
+ A class-based namespace is a class that contains all the event handlers
+ for a Socket.IO namespace. The event handlers are methods of the class
+ with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``,
+ ``on_message``, ``on_json``, and so on. These can be regular functions or
+ coroutines.
+
+ :param namespace: The Socket.IO namespace to be used with all the event
+ handlers defined in this class. If this argument is
+ omitted, the default namespace is used.
+ """
+ def is_asyncio_based(self):
+ return True
+
+ async def trigger_event(self, event, *args):
+ """Dispatch an event to the proper handler method.
+
+ In the most common usage, this method is not overloaded by subclasses,
+ as it performs the routing of events to methods. However, this
+ method can be overridden if special dispatching rules are needed, or if
+ having a single method that catches all events is desired.
+
+ Note: this method is a coroutine.
+ """
+ handler_name = 'on_' + event
+ if hasattr(self, handler_name):
+ handler = getattr(self, handler_name)
+ if asyncio.iscoroutinefunction(handler) is True:
+ try:
+ ret = await handler(*args)
+ except asyncio.CancelledError: # pragma: no cover
+ ret = None
+ else:
+ ret = handler(*args)
+ return ret
+
+ async def emit(self, event, data=None, to=None, room=None, skip_sid=None,
+ namespace=None, callback=None, ignore_queue=False):
+ """Emit a custom event to one or more connected clients.
+
+ The only difference with the :func:`socketio.Server.emit` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.server.emit(event, data=data, to=to, room=room,
+ skip_sid=skip_sid,
+ namespace=namespace or self.namespace,
+ callback=callback,
+ ignore_queue=ignore_queue)
+
+ async def send(self, data, to=None, room=None, skip_sid=None,
+ namespace=None, callback=None, ignore_queue=False):
+ """Send a message to one or more connected clients.
+
+ The only difference with the :func:`socketio.Server.send` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.server.send(data, to=to, room=room,
+ skip_sid=skip_sid,
+ namespace=namespace or self.namespace,
+ callback=callback,
+ ignore_queue=ignore_queue)
+
+ async def call(self, event, data=None, to=None, sid=None, namespace=None,
+ timeout=None, ignore_queue=False):
+ """Emit a custom event to a client and wait for the response.
+
+ The only difference with the :func:`socketio.Server.call` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return await self.server.call(event, data=data, to=to, sid=sid,
+ namespace=namespace or self.namespace,
+ timeout=timeout,
+ ignore_queue=ignore_queue)
+
+ async def enter_room(self, sid, room, namespace=None):
+ """Enter a room.
+
+ The only difference with the :func:`socketio.Server.enter_room` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.server.enter_room(
+ sid, room, namespace=namespace or self.namespace)
+
+ async def leave_room(self, sid, room, namespace=None):
+ """Leave a room.
+
+ The only difference with the :func:`socketio.Server.leave_room` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.server.leave_room(
+ sid, room, namespace=namespace or self.namespace)
+
+ async def close_room(self, room, namespace=None):
+ """Close a room.
+
+ The only difference with the :func:`socketio.Server.close_room` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.server.close_room(
+ room, namespace=namespace or self.namespace)
+
+ async def get_session(self, sid, namespace=None):
+ """Return the user session for a client.
+
+ The only difference with the :func:`socketio.Server.get_session`
+ method is that when the ``namespace`` argument is not given the
+ namespace associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.server.get_session(
+ sid, namespace=namespace or self.namespace)
+
+ async def save_session(self, sid, session, namespace=None):
+ """Store the user session for a client.
+
+ The only difference with the :func:`socketio.Server.save_session`
+ method is that when the ``namespace`` argument is not given the
+ namespace associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.server.save_session(
+ sid, session, namespace=namespace or self.namespace)
+
+ def session(self, sid, namespace=None):
+ """Return the user session for a client with context manager syntax.
+
+ The only difference with the :func:`socketio.Server.session` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.session(sid, namespace=namespace or self.namespace)
+
+ async def disconnect(self, sid, namespace=None):
+ """Disconnect a client.
+
+ The only difference with the :func:`socketio.Server.disconnect` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.server.disconnect(
+ sid, namespace=namespace or self.namespace)
+
+
+class AsyncClientNamespace(base_namespace.BaseClientNamespace):
+ """Base class for asyncio client-side class-based namespaces.
+
+ A class-based namespace is a class that contains all the event handlers
+ for a Socket.IO namespace. The event handlers are methods of the class
+ with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``,
+ ``on_message``, ``on_json``, and so on. These can be regular functions or
+ coroutines.
+
+ :param namespace: The Socket.IO namespace to be used with all the event
+ handlers defined in this class. If this argument is
+ omitted, the default namespace is used.
+ """
+ def is_asyncio_based(self):
+ return True
+
+ async def trigger_event(self, event, *args):
+ """Dispatch an event to the proper handler method.
+
+ In the most common usage, this method is not overloaded by subclasses,
+ as it performs the routing of events to methods. However, this
+ method can be overridden if special dispatching rules are needed, or if
+ having a single method that catches all events is desired.
+
+ Note: this method is a coroutine.
+ """
+ handler_name = 'on_' + event
+ if hasattr(self, handler_name):
+ handler = getattr(self, handler_name)
+ if asyncio.iscoroutinefunction(handler) is True:
+ try:
+ ret = await handler(*args)
+ except asyncio.CancelledError: # pragma: no cover
+ ret = None
+ else:
+ ret = handler(*args)
+ return ret
+
+ async def emit(self, event, data=None, namespace=None, callback=None):
+ """Emit a custom event to the server.
+
+ The only difference with the :func:`socketio.Client.emit` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.client.emit(event, data=data,
+ namespace=namespace or self.namespace,
+ callback=callback)
+
+ async def send(self, data, namespace=None, callback=None):
+ """Send a message to the server.
+
+ The only difference with the :func:`socketio.Client.send` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.client.send(data,
+ namespace=namespace or self.namespace,
+ callback=callback)
+
+ async def call(self, event, data=None, namespace=None, timeout=None):
+ """Emit a custom event to the server and wait for the response.
+
+ The only difference with the :func:`socketio.Client.call` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return await self.client.call(event, data=data,
+ namespace=namespace or self.namespace,
+ timeout=timeout)
+
+ async def disconnect(self):
+ """Disconnect a client.
+
+ The only difference with the :func:`socketio.Client.disconnect` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+
+ Note: this method is a coroutine.
+ """
+ return await self.client.disconnect()
diff --git a/.venv/Lib/site-packages/socketio/async_pubsub_manager.py b/.venv/Lib/site-packages/socketio/async_pubsub_manager.py
new file mode 100644
index 0000000..3e11f1e
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/async_pubsub_manager.py
@@ -0,0 +1,242 @@
+import asyncio
+from functools import partial
+import uuid
+
+from engineio import json
+import pickle
+
+from .async_manager import AsyncManager
+
+
+class AsyncPubSubManager(AsyncManager):
+ """Manage a client list attached to a pub/sub backend under asyncio.
+
+ This is a base class that enables multiple servers to share the list of
+ clients, with the servers communicating events through a pub/sub backend.
+ The use of a pub/sub backend also allows any client connected to the
+ backend to emit events addressed to Socket.IO clients.
+
+ The actual backends must be implemented by subclasses, this class only
+ provides a pub/sub generic framework for asyncio applications.
+
+ :param channel: The channel name on which the server sends and receives
+ notifications.
+ """
+ name = 'asyncpubsub'
+
+ def __init__(self, channel='socketio', write_only=False, logger=None):
+ super().__init__()
+ self.channel = channel
+ self.write_only = write_only
+ self.host_id = uuid.uuid4().hex
+ self.logger = logger
+
+ def initialize(self):
+ super().initialize()
+ if not self.write_only:
+ self.thread = self.server.start_background_task(self._thread)
+ self._get_logger().info(self.name + ' backend initialized.')
+
+ async def emit(self, event, data, namespace=None, room=None, skip_sid=None,
+ callback=None, **kwargs):
+ """Emit a message to a single client, a room, or all the clients
+ connected to the namespace.
+
+ This method takes care or propagating the message to all the servers
+ that are connected through the message queue.
+
+ The parameters are the same as in :meth:`.Server.emit`.
+
+ Note: this method is a coroutine.
+ """
+ if kwargs.get('ignore_queue'):
+ return await super().emit(
+ event, data, namespace=namespace, room=room, skip_sid=skip_sid,
+ callback=callback)
+ namespace = namespace or '/'
+ if callback is not None:
+ if self.server is None:
+ raise RuntimeError('Callbacks can only be issued from the '
+ 'context of a server.')
+ if room is None:
+ raise ValueError('Cannot use callback without a room set.')
+ id = self._generate_ack_id(room, callback)
+ callback = (room, namespace, id)
+ else:
+ callback = None
+ message = {'method': 'emit', 'event': event, 'data': data,
+ 'namespace': namespace, 'room': room,
+ 'skip_sid': skip_sid, 'callback': callback,
+ 'host_id': self.host_id}
+ await self._handle_emit(message) # handle in this host
+ await self._publish(message) # notify other hosts
+
+ async def can_disconnect(self, sid, namespace):
+ if self.is_connected(sid, namespace):
+ # client is in this server, so we can disconnect directly
+ return await super().can_disconnect(sid, namespace)
+ else:
+ # client is in another server, so we post request to the queue
+ await self._publish({'method': 'disconnect', 'sid': sid,
+ 'namespace': namespace or '/',
+ 'host_id': self.host_id})
+
+ async def disconnect(self, sid, namespace, **kwargs):
+ if kwargs.get('ignore_queue'):
+ return await super().disconnect(
+ sid, namespace=namespace)
+ message = {'method': 'disconnect', 'sid': sid,
+ 'namespace': namespace or '/', 'host_id': self.host_id}
+ await self._handle_disconnect(message) # handle in this host
+ await self._publish(message) # notify other hosts
+
+ async def enter_room(self, sid, namespace, room, eio_sid=None):
+ if self.is_connected(sid, namespace):
+ # client is in this server, so we can disconnect directly
+ return await super().enter_room(sid, namespace, room,
+ eio_sid=eio_sid)
+ else:
+ message = {'method': 'enter_room', 'sid': sid, 'room': room,
+ 'namespace': namespace or '/', 'host_id': self.host_id}
+ await self._publish(message) # notify other hosts
+
+ async def leave_room(self, sid, namespace, room):
+ if self.is_connected(sid, namespace):
+ # client is in this server, so we can disconnect directly
+ return await super().leave_room(sid, namespace, room)
+ else:
+ message = {'method': 'leave_room', 'sid': sid, 'room': room,
+ 'namespace': namespace or '/', 'host_id': self.host_id}
+ await self._publish(message) # notify other hosts
+
+ async def close_room(self, room, namespace=None):
+ message = {'method': 'close_room', 'room': room,
+ 'namespace': namespace or '/', 'host_id': self.host_id}
+ await self._handle_close_room(message) # handle in this host
+ await self._publish(message) # notify other hosts
+
+ async def _publish(self, data):
+ """Publish a message on the Socket.IO channel.
+
+ This method needs to be implemented by the different subclasses that
+ support pub/sub backends.
+ """
+ raise NotImplementedError('This method must be implemented in a '
+ 'subclass.') # pragma: no cover
+
+ async def _listen(self):
+ """Return the next message published on the Socket.IO channel,
+ blocking until a message is available.
+
+ This method needs to be implemented by the different subclasses that
+ support pub/sub backends.
+ """
+ raise NotImplementedError('This method must be implemented in a '
+ 'subclass.') # pragma: no cover
+
+ async def _handle_emit(self, message):
+ # Events with callbacks are very tricky to handle across hosts
+ # Here in the receiving end we set up a local callback that preserves
+ # the callback host and id from the sender
+ remote_callback = message.get('callback')
+ remote_host_id = message.get('host_id')
+ if remote_callback is not None and len(remote_callback) == 3:
+ callback = partial(self._return_callback, remote_host_id,
+ *remote_callback)
+ else:
+ callback = None
+ await super().emit(message['event'], message['data'],
+ namespace=message.get('namespace'),
+ room=message.get('room'),
+ skip_sid=message.get('skip_sid'),
+ callback=callback)
+
+ async def _handle_callback(self, message):
+ if self.host_id == message.get('host_id'):
+ try:
+ sid = message['sid']
+ id = message['id']
+ args = message['args']
+ except KeyError:
+ return
+ await self.trigger_callback(sid, id, args)
+
+ async def _return_callback(self, host_id, sid, namespace, callback_id,
+ *args):
+ # When an event callback is received, the callback is returned back
+ # the sender, which is identified by the host_id
+ if host_id == self.host_id:
+ await self.trigger_callback(sid, callback_id, args)
+ else:
+ await self._publish({'method': 'callback', 'host_id': host_id,
+ 'sid': sid, 'namespace': namespace,
+ 'id': callback_id, 'args': args})
+
+ async def _handle_disconnect(self, message):
+ await self.server.disconnect(sid=message.get('sid'),
+ namespace=message.get('namespace'),
+ ignore_queue=True)
+
+ async def _handle_enter_room(self, message):
+ sid = message.get('sid')
+ namespace = message.get('namespace')
+ if self.is_connected(sid, namespace):
+ await super().enter_room(sid, namespace, message.get('room'))
+
+ async def _handle_leave_room(self, message):
+ sid = message.get('sid')
+ namespace = message.get('namespace')
+ if self.is_connected(sid, namespace):
+ await super().leave_room(sid, namespace, message.get('room'))
+
+ async def _handle_close_room(self, message):
+ await super().close_room(room=message.get('room'),
+ namespace=message.get('namespace'))
+
+ async def _thread(self):
+ while True:
+ try:
+ async for message in self._listen(): # pragma: no branch
+ data = None
+ if isinstance(message, dict):
+ data = message
+ else:
+ if isinstance(message, bytes): # pragma: no cover
+ try:
+ data = pickle.loads(message)
+ except:
+ pass
+ if data is None:
+ try:
+ data = json.loads(message)
+ except:
+ pass
+ if data and 'method' in data:
+ self._get_logger().debug('pubsub message: {}'.format(
+ data['method']))
+ try:
+ if data['method'] == 'callback':
+ await self._handle_callback(data)
+ elif data.get('host_id') != self.host_id:
+ if data['method'] == 'emit':
+ await self._handle_emit(data)
+ elif data['method'] == 'disconnect':
+ await self._handle_disconnect(data)
+ elif data['method'] == 'enter_room':
+ await self._handle_enter_room(data)
+ elif data['method'] == 'leave_room':
+ await self._handle_leave_room(data)
+ elif data['method'] == 'close_room':
+ await self._handle_close_room(data)
+ except asyncio.CancelledError:
+ raise # let the outer try/except handle it
+ except Exception:
+ self.server.logger.exception(
+ 'Handler error in pubsub listening thread')
+ self.server.logger.error('pubsub listen() exited unexpectedly')
+ break # loop should never exit except in unit tests!
+ except asyncio.CancelledError: # pragma: no cover
+ break
+ except Exception: # pragma: no cover
+ self.server.logger.exception('Unexpected Error in pubsub '
+ 'listening thread')
diff --git a/.venv/Lib/site-packages/socketio/async_redis_manager.py b/.venv/Lib/site-packages/socketio/async_redis_manager.py
new file mode 100644
index 0000000..e039c6e
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/async_redis_manager.py
@@ -0,0 +1,107 @@
+import asyncio
+import pickle
+
+try: # pragma: no cover
+ from redis import asyncio as aioredis
+ from redis.exceptions import RedisError
+except ImportError: # pragma: no cover
+ try:
+ import aioredis
+ from aioredis.exceptions import RedisError
+ except ImportError:
+ aioredis = None
+ RedisError = None
+
+from .async_pubsub_manager import AsyncPubSubManager
+
+
+class AsyncRedisManager(AsyncPubSubManager): # pragma: no cover
+ """Redis based client manager for asyncio servers.
+
+ This class implements a Redis backend for event sharing across multiple
+ processes.
+
+ To use a Redis backend, initialize the :class:`AsyncServer` instance as
+ follows::
+
+ url = 'redis://hostname:port/0'
+ server = socketio.AsyncServer(
+ client_manager=socketio.AsyncRedisManager(url))
+
+ :param url: The connection URL for the Redis server. For a default Redis
+ store running on the same host, use ``redis://``. To use an
+ SSL connection, use ``rediss://``.
+ :param channel: The channel name on which the server sends and receives
+ notifications. Must be the same in all the servers.
+ :param write_only: If set to ``True``, only initialize to emit events. The
+ default of ``False`` initializes the class for emitting
+ and receiving.
+ :param redis_options: additional keyword arguments to be passed to
+ ``aioredis.from_url()``.
+ """
+ name = 'aioredis'
+
+ def __init__(self, url='redis://localhost:6379/0', channel='socketio',
+ write_only=False, logger=None, redis_options=None):
+ if aioredis is None:
+ raise RuntimeError('Redis package is not installed '
+ '(Run "pip install redis" in your virtualenv).')
+ if not hasattr(aioredis.Redis, 'from_url'):
+ raise RuntimeError('Version 2 of aioredis package is required.')
+ self.redis_url = url
+ self.redis_options = redis_options or {}
+ self._redis_connect()
+ super().__init__(channel=channel, write_only=write_only, logger=logger)
+
+ def _redis_connect(self):
+ self.redis = aioredis.Redis.from_url(self.redis_url,
+ **self.redis_options)
+ self.pubsub = self.redis.pubsub(ignore_subscribe_messages=True)
+
+ async def _publish(self, data):
+ retry = True
+ while True:
+ try:
+ if not retry:
+ self._redis_connect()
+ return await self.redis.publish(
+ self.channel, pickle.dumps(data))
+ except RedisError:
+ if retry:
+ self._get_logger().error('Cannot publish to redis... '
+ 'retrying')
+ retry = False
+ else:
+ self._get_logger().error('Cannot publish to redis... '
+ 'giving up')
+ break
+
+ async def _redis_listen_with_retries(self):
+ retry_sleep = 1
+ connect = False
+ while True:
+ try:
+ if connect:
+ self._redis_connect()
+ await self.pubsub.subscribe(self.channel)
+ retry_sleep = 1
+ async for message in self.pubsub.listen():
+ yield message
+ except RedisError:
+ self._get_logger().error('Cannot receive from redis... '
+ 'retrying in '
+ '{} secs'.format(retry_sleep))
+ connect = True
+ await asyncio.sleep(retry_sleep)
+ retry_sleep *= 2
+ if retry_sleep > 60:
+ retry_sleep = 60
+
+ async def _listen(self):
+ channel = self.channel.encode('utf-8')
+ await self.pubsub.subscribe(self.channel)
+ async for message in self._redis_listen_with_retries():
+ if message['channel'] == channel and \
+ message['type'] == 'message' and 'data' in message:
+ yield message['data']
+ await self.pubsub.unsubscribe(self.channel)
diff --git a/.venv/Lib/site-packages/socketio/async_server.py b/.venv/Lib/site-packages/socketio/async_server.py
new file mode 100644
index 0000000..1e523ff
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/async_server.py
@@ -0,0 +1,697 @@
+import asyncio
+
+import engineio
+
+from . import async_manager
+from . import base_server
+from . import exceptions
+from . import packet
+
+# this set is used to keep references to background tasks to prevent them from
+# being garbage collected mid-execution. Solution taken from
+# https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task
+task_reference_holder = set()
+
+
+class AsyncServer(base_server.BaseServer):
+ """A Socket.IO server for asyncio.
+
+ This class implements a fully compliant Socket.IO web server with support
+ for websocket and long-polling transports, compatible with the asyncio
+ framework.
+
+ :param client_manager: The client manager instance that will manage the
+ client list. When this is omitted, the client list
+ is stored in an in-memory structure, so the use of
+ multiple connected servers is not possible.
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. Note that fatal
+ errors are logged even when ``logger`` is ``False``.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions.
+ :param async_handlers: If set to ``True``, event handlers for a client are
+ executed in separate threads. To run handlers for a
+ client synchronously, set to ``False``. The default
+ is ``True``.
+ :param always_connect: When set to ``False``, new connections are
+ provisory until the connect handler returns
+ something other than ``False``, at which point they
+ are accepted. When set to ``True``, connections are
+ immediately accepted, and then if the connect
+ handler returns ``False`` a disconnect is issued.
+ Set to ``True`` if you need to emit events from the
+ connect handler and your client is confused when it
+ receives events before the connection acceptance.
+ In any other case use the default of ``False``.
+ :param namespaces: a list of namespaces that are accepted, in addition to
+ any namespaces for which handlers have been defined. The
+ default is `['/']`, which always accepts connections to
+ the default namespace. Set to `'*'` to accept all
+ namespaces.
+ :param kwargs: Connection parameters for the underlying Engine.IO server.
+
+ The Engine.IO configuration supports the following settings:
+
+ :param async_mode: The asynchronous model to use. See the Deployment
+ section in the documentation for a description of the
+ available options. Valid async modes are "aiohttp",
+ "sanic", "tornado" and "asgi". If this argument is not
+ given, "aiohttp" is tried first, followed by "sanic",
+ "tornado", and finally "asgi". The first async mode that
+ has all its dependencies installed is the one that is
+ chosen.
+ :param ping_interval: The interval in seconds at which the server pings
+ the client. The default is 25 seconds. For advanced
+ control, a two element tuple can be given, where
+ the first number is the ping interval and the second
+ is a grace period added by the server.
+ :param ping_timeout: The time in seconds that the client waits for the
+ server to respond before disconnecting. The default
+ is 20 seconds.
+ :param max_http_buffer_size: The maximum size that is accepted for incoming
+ messages. The default is 1,000,000 bytes. In
+ spite of its name, the value set in this
+ argument is enforced for HTTP long-polling and
+ WebSocket connections.
+ :param allow_upgrades: Whether to allow transport upgrades or not. The
+ default is ``True``.
+ :param http_compression: Whether to compress packages when using the
+ polling transport. The default is ``True``.
+ :param compression_threshold: Only compress messages when their byte size
+ is greater than this value. The default is
+ 1024 bytes.
+ :param cookie: If set to a string, it is the name of the HTTP cookie the
+ server sends back to the client containing the client
+ session id. If set to a dictionary, the ``'name'`` key
+ contains the cookie name and other keys define cookie
+ attributes, where the value of each attribute can be a
+ string, a callable with no arguments, or a boolean. If set
+ to ``None`` (the default), a cookie is not sent to the
+ client.
+ :param cors_allowed_origins: Origin or list of origins that are allowed to
+ connect to this server. Only the same origin
+ is allowed by default. Set this argument to
+ ``'*'`` to allow all origins, or to ``[]`` to
+ disable CORS handling.
+ :param cors_credentials: Whether credentials (cookies, authentication) are
+ allowed in requests to this server. The default is
+ ``True``.
+ :param monitor_clients: If set to ``True``, a background task will ensure
+ inactive clients are closed. Set to ``False`` to
+ disable the monitoring task (not recommended). The
+ default is ``True``.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. Defaults to
+ ``['polling', 'websocket']``.
+ :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
+ a logger object to use. To disable logging set to
+ ``False``. The default is ``False``. Note that
+ fatal errors are logged even when
+ ``engineio_logger`` is ``False``.
+ """
+ def __init__(self, client_manager=None, logger=False, json=None,
+ async_handlers=True, namespaces=None, **kwargs):
+ if client_manager is None:
+ client_manager = async_manager.AsyncManager()
+ super().__init__(client_manager=client_manager, logger=logger,
+ json=json, async_handlers=async_handlers,
+ namespaces=namespaces, **kwargs)
+
+ def is_asyncio_based(self):
+ return True
+
+ def attach(self, app, socketio_path='socket.io'):
+ """Attach the Socket.IO server to an application."""
+ self.eio.attach(app, socketio_path)
+
+ async def emit(self, event, data=None, to=None, room=None, skip_sid=None,
+ namespace=None, callback=None, ignore_queue=False):
+ """Emit a custom event to one or more connected clients.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the client or clients. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param to: The recipient of the message. This can be set to the
+ session ID of a client to address only that client, to any
+ any custom room created by the application to address all
+ the clients in that room, or to a list of custom room
+ names. If this argument is omitted the event is broadcasted
+ to all connected clients.
+ :param room: Alias for the ``to`` parameter.
+ :param skip_sid: The session ID of a client to skip when broadcasting
+ to a room or to all clients. This can be used to
+ prevent a message from being sent to the sender.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param callback: If given, this function will be called to acknowledge
+ the client has received the message. The arguments
+ that will be passed to the function are those provided
+ by the client. Callback functions can only be used
+ when addressing an individual client.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ clients directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used. It is recommended
+ to always leave this parameter with its default
+ value of ``False``.
+
+ Note: this method is not designed to be used concurrently. If multiple
+ tasks are emitting at the same time to the same client connection, then
+ messages composed of multiple packets may end up being sent in an
+ incorrect sequence. Use standard concurrency solutions (such as a Lock
+ object) to prevent this situation.
+
+ Note 2: this method is a coroutine.
+ """
+ namespace = namespace or '/'
+ room = to or room
+ self.logger.info('emitting event "%s" to %s [%s]', event,
+ room or 'all', namespace)
+ await self.manager.emit(event, data, namespace, room=room,
+ skip_sid=skip_sid, callback=callback,
+ ignore_queue=ignore_queue)
+
+ async def send(self, data, to=None, room=None, skip_sid=None,
+ namespace=None, callback=None, ignore_queue=False):
+ """Send a message to one or more connected clients.
+
+ This function emits an event with the name ``'message'``. Use
+ :func:`emit` to issue custom event names.
+
+ :param data: The data to send to the client or clients. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param to: The recipient of the message. This can be set to the
+ session ID of a client to address only that client, to any
+ any custom room created by the application to address all
+ the clients in that room, or to a list of custom room
+ names. If this argument is omitted the event is broadcasted
+ to all connected clients.
+ :param room: Alias for the ``to`` parameter.
+ :param skip_sid: The session ID of a client to skip when broadcasting
+ to a room or to all clients. This can be used to
+ prevent a message from being sent to the sender.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param callback: If given, this function will be called to acknowledge
+ the client has received the message. The arguments
+ that will be passed to the function are those provided
+ by the client. Callback functions can only be used
+ when addressing an individual client.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ clients directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used. It is recommended
+ to always leave this parameter with its default
+ value of ``False``.
+
+ Note: this method is a coroutine.
+ """
+ await self.emit('message', data=data, to=to, room=room,
+ skip_sid=skip_sid, namespace=namespace,
+ callback=callback, ignore_queue=ignore_queue)
+
+ async def call(self, event, data=None, to=None, sid=None, namespace=None,
+ timeout=60, ignore_queue=False):
+ """Emit a custom event to a client and wait for the response.
+
+ This method issues an emit with a callback and waits for the callback
+ to be invoked before returning. If the callback isn't invoked before
+ the timeout, then a ``TimeoutError`` exception is raised. If the
+ Socket.IO connection drops during the wait, this method still waits
+ until the specified timeout.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the client or clients. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param to: The session ID of the recipient client.
+ :param sid: Alias for the ``to`` parameter.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the client acknowledges the event, then a
+ ``TimeoutError`` exception is raised.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ client directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used. It is recommended
+ to always leave this parameter with its default
+ value of ``False``.
+
+ Note: this method is not designed to be used concurrently. If multiple
+ tasks are emitting at the same time to the same client connection, then
+ messages composed of multiple packets may end up being sent in an
+ incorrect sequence. Use standard concurrency solutions (such as a Lock
+ object) to prevent this situation.
+
+ Note 2: this method is a coroutine.
+ """
+ if to is None and sid is None:
+ raise ValueError('Cannot use call() to broadcast.')
+ if not self.async_handlers:
+ raise RuntimeError(
+ 'Cannot use call() when async_handlers is False.')
+ callback_event = self.eio.create_event()
+ callback_args = []
+
+ def event_callback(*args):
+ callback_args.append(args)
+ callback_event.set()
+
+ await self.emit(event, data=data, room=to or sid, namespace=namespace,
+ callback=event_callback, ignore_queue=ignore_queue)
+ try:
+ await asyncio.wait_for(callback_event.wait(), timeout)
+ except asyncio.TimeoutError:
+ raise exceptions.TimeoutError() from None
+ return callback_args[0] if len(callback_args[0]) > 1 \
+ else callback_args[0][0] if len(callback_args[0]) == 1 \
+ else None
+
+ async def enter_room(self, sid, room, namespace=None):
+ """Enter a room.
+
+ This function adds the client to a room. The :func:`emit` and
+ :func:`send` functions can optionally broadcast events to all the
+ clients in a room.
+
+ :param sid: Session ID of the client.
+ :param room: Room name. If the room does not exist it is created.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the default namespace is used.
+
+ Note: this method is a coroutine.
+ """
+ namespace = namespace or '/'
+ self.logger.info('%s is entering room %s [%s]', sid, room, namespace)
+ await self.manager.enter_room(sid, namespace, room)
+
+ async def leave_room(self, sid, room, namespace=None):
+ """Leave a room.
+
+ This function removes the client from a room.
+
+ :param sid: Session ID of the client.
+ :param room: Room name.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the default namespace is used.
+
+ Note: this method is a coroutine.
+ """
+ namespace = namespace or '/'
+ self.logger.info('%s is leaving room %s [%s]', sid, room, namespace)
+ await self.manager.leave_room(sid, namespace, room)
+
+ async def close_room(self, room, namespace=None):
+ """Close a room.
+
+ This function removes all the clients from the given room.
+
+ :param room: Room name.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the default namespace is used.
+
+ Note: this method is a coroutine.
+ """
+ namespace = namespace or '/'
+ self.logger.info('room %s is closing [%s]', room, namespace)
+ await self.manager.close_room(room, namespace)
+
+ async def get_session(self, sid, namespace=None):
+ """Return the user session for a client.
+
+ :param sid: The session id of the client.
+ :param namespace: The Socket.IO namespace. If this argument is omitted
+ the default namespace is used.
+
+ The return value is a dictionary. Modifications made to this
+ dictionary are not guaranteed to be preserved. If you want to modify
+ the user session, use the ``session`` context manager instead.
+ """
+ namespace = namespace or '/'
+ eio_sid = self.manager.eio_sid_from_sid(sid, namespace)
+ eio_session = await self.eio.get_session(eio_sid)
+ return eio_session.setdefault(namespace, {})
+
+ async def save_session(self, sid, session, namespace=None):
+ """Store the user session for a client.
+
+ :param sid: The session id of the client.
+ :param session: The session dictionary.
+ :param namespace: The Socket.IO namespace. If this argument is omitted
+ the default namespace is used.
+ """
+ namespace = namespace or '/'
+ eio_sid = self.manager.eio_sid_from_sid(sid, namespace)
+ eio_session = await self.eio.get_session(eio_sid)
+ eio_session[namespace] = session
+
+ def session(self, sid, namespace=None):
+ """Return the user session for a client with context manager syntax.
+
+ :param sid: The session id of the client.
+
+ This is a context manager that returns the user session dictionary for
+ the client. Any changes that are made to this dictionary inside the
+ context manager block are saved back to the session. Example usage::
+
+ @eio.on('connect')
+ def on_connect(sid, environ):
+ username = authenticate_user(environ)
+ if not username:
+ return False
+ with eio.session(sid) as session:
+ session['username'] = username
+
+ @eio.on('message')
+ def on_message(sid, msg):
+ async with eio.session(sid) as session:
+ print('received message from ', session['username'])
+ """
+ class _session_context_manager(object):
+ def __init__(self, server, sid, namespace):
+ self.server = server
+ self.sid = sid
+ self.namespace = namespace
+ self.session = None
+
+ async def __aenter__(self):
+ self.session = await self.server.get_session(
+ sid, namespace=self.namespace)
+ return self.session
+
+ async def __aexit__(self, *args):
+ await self.server.save_session(sid, self.session,
+ namespace=self.namespace)
+
+ return _session_context_manager(self, sid, namespace)
+
+ async def disconnect(self, sid, namespace=None, ignore_queue=False):
+ """Disconnect a client.
+
+ :param sid: Session ID of the client.
+ :param namespace: The Socket.IO namespace to disconnect. If this
+ argument is omitted the default namespace is used.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the disconnect is processed
+ locally, without broadcasting on the queue. It is
+ recommended to always leave this parameter with
+ its default value of ``False``.
+
+ Note: this method is a coroutine.
+ """
+ namespace = namespace or '/'
+ if ignore_queue:
+ delete_it = self.manager.is_connected(sid, namespace)
+ else:
+ delete_it = await self.manager.can_disconnect(sid, namespace)
+ if delete_it:
+ self.logger.info('Disconnecting %s [%s]', sid, namespace)
+ eio_sid = self.manager.pre_disconnect(sid, namespace=namespace)
+ await self._send_packet(eio_sid, self.packet_class(
+ packet.DISCONNECT, namespace=namespace))
+ await self._trigger_event('disconnect', namespace, sid)
+ await self.manager.disconnect(sid, namespace=namespace,
+ ignore_queue=True)
+
+ async def shutdown(self):
+ """Stop Socket.IO background tasks.
+
+ This method stops all background activity initiated by the Socket.IO
+ server. It must be called before shutting down the web server.
+ """
+ self.logger.info('Socket.IO is shutting down')
+ await self.eio.shutdown()
+
+ async def handle_request(self, *args, **kwargs):
+ """Handle an HTTP request from the client.
+
+ This is the entry point of the Socket.IO application. This function
+ returns the HTTP response body to deliver to the client.
+
+ Note: this method is a coroutine.
+ """
+ return await self.eio.handle_request(*args, **kwargs)
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task using the appropriate async model.
+
+ This is a utility function that applications can use to start a
+ background task using the method that is compatible with the
+ selected async mode.
+
+ :param target: the target function to execute. Must be a coroutine.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ The return value is a ``asyncio.Task`` object.
+ """
+ return self.eio.start_background_task(target, *args, **kwargs)
+
+ async def sleep(self, seconds=0):
+ """Sleep for the requested amount of time using the appropriate async
+ model.
+
+ This is a utility function that applications can use to put a task to
+ sleep without having to worry about using the correct call for the
+ selected async mode.
+
+ Note: this method is a coroutine.
+ """
+ return await self.eio.sleep(seconds)
+
+ def instrument(self, auth=None, mode='development', read_only=False,
+ server_id=None, namespace='/admin',
+ server_stats_interval=2):
+ """Instrument the Socket.IO server for monitoring with the `Socket.IO
+ Admin UI `_.
+
+ :param auth: Authentication credentials for Admin UI access. Set to a
+ dictionary with the expected login (usually ``username``
+ and ``password``) or a list of dictionaries if more than
+ one set of credentials need to be available. For more
+ complex authentication methods, set to a callable that
+ receives the authentication dictionary as an argument and
+ returns ``True`` if the user is allowed or ``False``
+ otherwise. To disable authentication, set this argument to
+ ``False`` (not recommended, never do this on a production
+ server).
+ :param mode: The reporting mode. The default is ``'development'``,
+ which is best used while debugging, as it may have a
+ significant performance effect. Set to ``'production'`` to
+ reduce the amount of information that is reported to the
+ admin UI.
+ :param read_only: If set to ``True``, the admin interface will be
+ read-only, with no option to modify room assignments
+ or disconnect clients. The default is ``False``.
+ :param server_id: The server name to use for this server. If this
+ argument is omitted, the server generates its own
+ name.
+ :param namespace: The Socket.IO namespace to use for the admin
+ interface. The default is ``/admin``.
+ :param server_stats_interval: The interval in seconds at which the
+ server emits a summary of it stats to all
+ connected admins.
+ """
+ from .async_admin import InstrumentedAsyncServer
+ return InstrumentedAsyncServer(
+ self, auth=auth, mode=mode, read_only=read_only,
+ server_id=server_id, namespace=namespace,
+ server_stats_interval=server_stats_interval)
+
+ async def _send_packet(self, eio_sid, pkt):
+ """Send a Socket.IO packet to a client."""
+ encoded_packet = pkt.encode()
+ if isinstance(encoded_packet, list):
+ for ep in encoded_packet:
+ await self.eio.send(eio_sid, ep)
+ else:
+ await self.eio.send(eio_sid, encoded_packet)
+
+ async def _send_eio_packet(self, eio_sid, eio_pkt):
+ """Send a raw Engine.IO packet to a client."""
+ await self.eio.send_packet(eio_sid, eio_pkt)
+
+ async def _handle_connect(self, eio_sid, namespace, data):
+ """Handle a client connection request."""
+ namespace = namespace or '/'
+ sid = None
+ if namespace in self.handlers or namespace in self.namespace_handlers \
+ or self.namespaces == '*' or namespace in self.namespaces:
+ sid = await self.manager.connect(eio_sid, namespace)
+ if sid is None:
+ await self._send_packet(eio_sid, self.packet_class(
+ packet.CONNECT_ERROR, data='Unable to connect',
+ namespace=namespace))
+ return
+
+ if self.always_connect:
+ await self._send_packet(eio_sid, self.packet_class(
+ packet.CONNECT, {'sid': sid}, namespace=namespace))
+ fail_reason = exceptions.ConnectionRefusedError().error_args
+ try:
+ if data:
+ success = await self._trigger_event(
+ 'connect', namespace, sid, self.environ[eio_sid], data)
+ else:
+ try:
+ success = await self._trigger_event(
+ 'connect', namespace, sid, self.environ[eio_sid])
+ except TypeError:
+ success = await self._trigger_event(
+ 'connect', namespace, sid, self.environ[eio_sid], None)
+ except exceptions.ConnectionRefusedError as exc:
+ fail_reason = exc.error_args
+ success = False
+
+ if success is False:
+ if self.always_connect:
+ self.manager.pre_disconnect(sid, namespace)
+ await self._send_packet(eio_sid, self.packet_class(
+ packet.DISCONNECT, data=fail_reason, namespace=namespace))
+ else:
+ await self._send_packet(eio_sid, self.packet_class(
+ packet.CONNECT_ERROR, data=fail_reason,
+ namespace=namespace))
+ await self.manager.disconnect(sid, namespace, ignore_queue=True)
+ elif not self.always_connect:
+ await self._send_packet(eio_sid, self.packet_class(
+ packet.CONNECT, {'sid': sid}, namespace=namespace))
+
+ async def _handle_disconnect(self, eio_sid, namespace):
+ """Handle a client disconnect."""
+ namespace = namespace or '/'
+ sid = self.manager.sid_from_eio_sid(eio_sid, namespace)
+ if not self.manager.is_connected(sid, namespace): # pragma: no cover
+ return
+ self.manager.pre_disconnect(sid, namespace=namespace)
+ await self._trigger_event('disconnect', namespace, sid)
+ await self.manager.disconnect(sid, namespace, ignore_queue=True)
+
+ async def _handle_event(self, eio_sid, namespace, id, data):
+ """Handle an incoming client event."""
+ namespace = namespace or '/'
+ sid = self.manager.sid_from_eio_sid(eio_sid, namespace)
+ self.logger.info('received event "%s" from %s [%s]', data[0], sid,
+ namespace)
+ if not self.manager.is_connected(sid, namespace):
+ self.logger.warning('%s is not connected to namespace %s',
+ sid, namespace)
+ return
+ if self.async_handlers:
+ task = self.start_background_task(
+ self._handle_event_internal, self, sid, eio_sid, data,
+ namespace, id)
+ task_reference_holder.add(task)
+ task.add_done_callback(task_reference_holder.discard)
+ else:
+ await self._handle_event_internal(self, sid, eio_sid, data,
+ namespace, id)
+
+ async def _handle_event_internal(self, server, sid, eio_sid, data,
+ namespace, id):
+ r = await server._trigger_event(data[0], namespace, sid, *data[1:])
+ if r != self.not_handled and id is not None:
+ # send ACK packet with the response returned by the handler
+ # tuples are expanded as multiple arguments
+ if r is None:
+ data = []
+ elif isinstance(r, tuple):
+ data = list(r)
+ else:
+ data = [r]
+ await server._send_packet(eio_sid, self.packet_class(
+ packet.ACK, namespace=namespace, id=id, data=data))
+
+ async def _handle_ack(self, eio_sid, namespace, id, data):
+ """Handle ACK packets from the client."""
+ namespace = namespace or '/'
+ sid = self.manager.sid_from_eio_sid(eio_sid, namespace)
+ self.logger.info('received ack from %s [%s]', sid, namespace)
+ await self.manager.trigger_callback(sid, id, data)
+
+ async def _trigger_event(self, event, namespace, *args):
+ """Invoke an application event handler."""
+ # first see if we have an explicit handler for the event
+ handler, args = self._get_event_handler(event, namespace, args)
+ if handler:
+ if asyncio.iscoroutinefunction(handler):
+ try:
+ ret = await handler(*args)
+ except asyncio.CancelledError: # pragma: no cover
+ ret = None
+ else:
+ ret = handler(*args)
+ return ret
+ # or else, forward the event to a namespace handler if one exists
+ handler, args = self._get_namespace_handler(namespace, args)
+ if handler:
+ return await handler.trigger_event(event, *args)
+ else:
+ return self.not_handled
+
+ async def _handle_eio_connect(self, eio_sid, environ):
+ """Handle the Engine.IO connection event."""
+ if not self.manager_initialized:
+ self.manager_initialized = True
+ self.manager.initialize()
+ self.environ[eio_sid] = environ
+
+ async def _handle_eio_message(self, eio_sid, data):
+ """Dispatch Engine.IO messages."""
+ if eio_sid in self._binary_packet:
+ pkt = self._binary_packet[eio_sid]
+ if pkt.add_attachment(data):
+ del self._binary_packet[eio_sid]
+ if pkt.packet_type == packet.BINARY_EVENT:
+ await self._handle_event(eio_sid, pkt.namespace, pkt.id,
+ pkt.data)
+ else:
+ await self._handle_ack(eio_sid, pkt.namespace, pkt.id,
+ pkt.data)
+ else:
+ pkt = self.packet_class(encoded_packet=data)
+ if pkt.packet_type == packet.CONNECT:
+ await self._handle_connect(eio_sid, pkt.namespace, pkt.data)
+ elif pkt.packet_type == packet.DISCONNECT:
+ await self._handle_disconnect(eio_sid, pkt.namespace)
+ elif pkt.packet_type == packet.EVENT:
+ await self._handle_event(eio_sid, pkt.namespace, pkt.id,
+ pkt.data)
+ elif pkt.packet_type == packet.ACK:
+ await self._handle_ack(eio_sid, pkt.namespace, pkt.id,
+ pkt.data)
+ elif pkt.packet_type == packet.BINARY_EVENT or \
+ pkt.packet_type == packet.BINARY_ACK:
+ self._binary_packet[eio_sid] = pkt
+ elif pkt.packet_type == packet.CONNECT_ERROR:
+ raise ValueError('Unexpected CONNECT_ERROR packet.')
+ else:
+ raise ValueError('Unknown packet type.')
+
+ async def _handle_eio_disconnect(self, eio_sid):
+ """Handle Engine.IO disconnect event."""
+ for n in list(self.manager.get_namespaces()).copy():
+ await self._handle_disconnect(eio_sid, n)
+ if eio_sid in self.environ:
+ del self.environ[eio_sid]
+
+ def _engineio_server_class(self):
+ return engineio.AsyncServer
diff --git a/.venv/Lib/site-packages/socketio/async_simple_client.py b/.venv/Lib/site-packages/socketio/async_simple_client.py
new file mode 100644
index 0000000..c6cd4fc
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/async_simple_client.py
@@ -0,0 +1,209 @@
+import asyncio
+from socketio import AsyncClient
+from socketio.exceptions import SocketIOError, TimeoutError, DisconnectedError
+
+
+class AsyncSimpleClient:
+ """A Socket.IO client.
+
+ This class implements a simple, yet fully compliant Socket.IO web client
+ with support for websocket and long-polling transports.
+
+ The positional and keyword arguments given in the constructor are passed
+ to the underlying :func:`socketio.AsyncClient` object.
+ """
+ def __init__(self, *args, **kwargs):
+ self.client_args = args
+ self.client_kwargs = kwargs
+ self.client = None
+ self.namespace = '/'
+ self.connected_event = asyncio.Event()
+ self.connected = False
+ self.input_event = asyncio.Event()
+ self.input_buffer = []
+
+ async def connect(self, url, headers={}, auth=None, transports=None,
+ namespace='/', socketio_path='socket.io',
+ wait_timeout=5):
+ """Connect to a Socket.IO server.
+
+ :param url: The URL of the Socket.IO server. It can include custom
+ query string parameters if required by the server. If a
+ function is provided, the client will invoke it to obtain
+ the URL each time a connection or reconnection is
+ attempted.
+ :param headers: A dictionary with custom headers to send with the
+ connection request. If a function is provided, the
+ client will invoke it to obtain the headers dictionary
+ each time a connection or reconnection is attempted.
+ :param auth: Authentication data passed to the server with the
+ connection request, normally a dictionary with one or
+ more string key/value pairs. If a function is provided,
+ the client will invoke it to obtain the authentication
+ data each time a connection or reconnection is attempted.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. If not
+ given, the polling transport is connected first,
+ then an upgrade to websocket is attempted.
+ :param namespace: The namespace to connect to as a string. If not
+ given, the default namespace ``/`` is used.
+ :param socketio_path: The endpoint where the Socket.IO server is
+ installed. The default value is appropriate for
+ most cases.
+ :param wait_timeout: How long the client should wait for the
+ connection. The default is 5 seconds.
+
+ Note: this method is a coroutine.
+ """
+ if self.connected:
+ raise RuntimeError('Already connected')
+ self.namespace = namespace
+ self.input_buffer = []
+ self.input_event.clear()
+ self.client = AsyncClient(*self.client_args, **self.client_kwargs)
+
+ @self.client.event(namespace=self.namespace)
+ def connect(): # pragma: no cover
+ self.connected = True
+ self.connected_event.set()
+
+ @self.client.event(namespace=self.namespace)
+ def disconnect(): # pragma: no cover
+ self.connected_event.clear()
+
+ @self.client.event(namespace=self.namespace)
+ def __disconnect_final(): # pragma: no cover
+ self.connected = False
+ self.connected_event.set()
+
+ @self.client.on('*', namespace=self.namespace)
+ def on_event(event, *args): # pragma: no cover
+ self.input_buffer.append([event, *args])
+ self.input_event.set()
+
+ await self.client.connect(
+ url, headers=headers, auth=auth, transports=transports,
+ namespaces=[namespace], socketio_path=socketio_path,
+ wait_timeout=wait_timeout)
+
+ @property
+ def sid(self):
+ """The session ID received from the server.
+
+ The session ID is not guaranteed to remain constant throughout the life
+ of the connection, as reconnections can cause it to change.
+ """
+ return self.client.get_sid(self.namespace) if self.client else None
+
+ @property
+ def transport(self):
+ """The name of the transport currently in use.
+
+ The transport is returned as a string and can be one of ``polling``
+ and ``websocket``.
+ """
+ return self.client.transport if self.client else ''
+
+ async def emit(self, event, data=None):
+ """Emit an event to the server.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+
+ Note: this method is a coroutine.
+
+ This method schedules the event to be sent out and returns, without
+ actually waiting for its delivery. In cases where the client needs to
+ ensure that the event was received, :func:`socketio.SimpleClient.call`
+ should be used instead.
+ """
+ while True:
+ await self.connected_event.wait()
+ if not self.connected:
+ raise DisconnectedError()
+ try:
+ return await self.client.emit(event, data,
+ namespace=self.namespace)
+ except SocketIOError:
+ pass
+
+ async def call(self, event, data=None, timeout=60):
+ """Emit an event to the server and wait for a response.
+
+ This method issues an emit and waits for the server to provide a
+ response or acknowledgement. If the response does not arrive before the
+ timeout, then a ``TimeoutError`` exception is raised.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the server acknowledges the event, then a
+ ``TimeoutError`` exception is raised.
+
+ Note: this method is a coroutine.
+ """
+ while True:
+ await self.connected_event.wait()
+ if not self.connected:
+ raise DisconnectedError()
+ try:
+ return await self.client.call(event, data,
+ namespace=self.namespace,
+ timeout=timeout)
+ except SocketIOError:
+ pass
+
+ async def receive(self, timeout=None):
+ """Wait for an event from the server.
+
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the server acknowledges the event, then a
+ ``TimeoutError`` exception is raised.
+
+ Note: this method is a coroutine.
+
+ The return value is a list with the event name as the first element. If
+ the server included arguments with the event, they are returned as
+ additional list elements.
+ """
+ while not self.input_buffer:
+ try:
+ await asyncio.wait_for(self.connected_event.wait(),
+ timeout=timeout)
+ except asyncio.TimeoutError: # pragma: no cover
+ raise TimeoutError()
+ if not self.connected:
+ raise DisconnectedError()
+ try:
+ await asyncio.wait_for(self.input_event.wait(),
+ timeout=timeout)
+ except asyncio.TimeoutError:
+ raise TimeoutError()
+ self.input_event.clear()
+ return self.input_buffer.pop(0)
+
+ async def disconnect(self):
+ """Disconnect from the server.
+
+ Note: this method is a coroutine.
+ """
+ if self.connected:
+ await self.client.disconnect()
+ self.client = None
+ self.connected = False
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.disconnect()
diff --git a/.venv/Lib/site-packages/socketio/base_client.py b/.venv/Lib/site-packages/socketio/base_client.py
new file mode 100644
index 0000000..1becf91
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/base_client.py
@@ -0,0 +1,292 @@
+import itertools
+import logging
+import signal
+import threading
+
+from . import base_namespace
+from . import packet
+
+default_logger = logging.getLogger('socketio.client')
+reconnecting_clients = []
+
+
+def signal_handler(sig, frame): # pragma: no cover
+ """SIGINT handler.
+
+ Notify any clients that are in a reconnect loop to abort. Other
+ disconnection tasks are handled at the engine.io level.
+ """
+ for client in reconnecting_clients[:]:
+ client._reconnect_abort.set()
+ if callable(original_signal_handler):
+ return original_signal_handler(sig, frame)
+ else: # pragma: no cover
+ # Handle case where no original SIGINT handler was present.
+ return signal.default_int_handler(sig, frame)
+
+
+original_signal_handler = None
+
+
+class BaseClient:
+ reserved_events = ['connect', 'connect_error', 'disconnect',
+ '__disconnect_final']
+
+ def __init__(self, reconnection=True, reconnection_attempts=0,
+ reconnection_delay=1, reconnection_delay_max=5,
+ randomization_factor=0.5, logger=False, serializer='default',
+ json=None, handle_sigint=True, **kwargs):
+ global original_signal_handler
+ if handle_sigint and original_signal_handler is None and \
+ threading.current_thread() == threading.main_thread():
+ original_signal_handler = signal.signal(signal.SIGINT,
+ signal_handler)
+ self.reconnection = reconnection
+ self.reconnection_attempts = reconnection_attempts
+ self.reconnection_delay = reconnection_delay
+ self.reconnection_delay_max = reconnection_delay_max
+ self.randomization_factor = randomization_factor
+ self.handle_sigint = handle_sigint
+
+ engineio_options = kwargs
+ engineio_options['handle_sigint'] = handle_sigint
+ engineio_logger = engineio_options.pop('engineio_logger', None)
+ if engineio_logger is not None:
+ engineio_options['logger'] = engineio_logger
+ if serializer == 'default':
+ self.packet_class = packet.Packet
+ elif serializer == 'msgpack':
+ from . import msgpack_packet
+ self.packet_class = msgpack_packet.MsgPackPacket
+ else:
+ self.packet_class = serializer
+ if json is not None:
+ self.packet_class.json = json
+ engineio_options['json'] = json
+
+ self.eio = self._engineio_client_class()(**engineio_options)
+ self.eio.on('connect', self._handle_eio_connect)
+ self.eio.on('message', self._handle_eio_message)
+ self.eio.on('disconnect', self._handle_eio_disconnect)
+
+ if not isinstance(logger, bool):
+ self.logger = logger
+ else:
+ self.logger = default_logger
+ if self.logger.level == logging.NOTSET:
+ if logger:
+ self.logger.setLevel(logging.INFO)
+ else:
+ self.logger.setLevel(logging.ERROR)
+ self.logger.addHandler(logging.StreamHandler())
+
+ self.connection_url = None
+ self.connection_headers = None
+ self.connection_auth = None
+ self.connection_transports = None
+ self.connection_namespaces = []
+ self.socketio_path = None
+ self.sid = None
+
+ self.connected = False #: Indicates if the client is connected or not.
+ self.namespaces = {} #: set of connected namespaces.
+ self.handlers = {}
+ self.namespace_handlers = {}
+ self.callbacks = {}
+ self._binary_packet = None
+ self._connect_event = None
+ self._reconnect_task = None
+ self._reconnect_abort = None
+
+ def is_asyncio_based(self):
+ return False
+
+ def on(self, event, handler=None, namespace=None):
+ """Register an event handler.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used. The ``'*'`` event name
+ can be used to define a catch-all event handler.
+ :param handler: The function that should be invoked to handle the
+ event. When this parameter is not given, the method
+ acts as a decorator for the handler function.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the handler is associated with
+ the default namespace. A catch-all namespace can be
+ defined by passing ``'*'`` as the namespace.
+
+ Example usage::
+
+ # as a decorator:
+ @sio.on('connect')
+ def connect_handler():
+ print('Connected!')
+
+ # as a method:
+ def message_handler(msg):
+ print('Received message: ', msg)
+ sio.send( 'response')
+ sio.on('message', message_handler)
+
+ The arguments passed to the handler function depend on the event type:
+
+ - The ``'connect'`` event handler does not take arguments.
+ - The ``'disconnect'`` event handler does not take arguments.
+ - The ``'message'`` handler and handlers for custom event names receive
+ the message payload as only argument. Any values returned from a
+ message handler will be passed to the client's acknowledgement
+ callback function if it exists.
+ - A catch-all event handler receives the event name as first argument,
+ followed by any arguments specific to the event.
+ - A catch-all namespace event handler receives the namespace as first
+ argument, followed by any arguments specific to the event.
+ - A combined catch-all namespace and catch-all event handler receives
+ the event name as first argument and the namespace as second
+ argument, followed by any arguments specific to the event.
+ """
+ namespace = namespace or '/'
+
+ def set_handler(handler):
+ if namespace not in self.handlers:
+ self.handlers[namespace] = {}
+ self.handlers[namespace][event] = handler
+ return handler
+
+ if handler is None:
+ return set_handler
+ set_handler(handler)
+
+ def event(self, *args, **kwargs):
+ """Decorator to register an event handler.
+
+ This is a simplified version of the ``on()`` method that takes the
+ event name from the decorated function.
+
+ Example usage::
+
+ @sio.event
+ def my_event(data):
+ print('Received data: ', data)
+
+ The above example is equivalent to::
+
+ @sio.on('my_event')
+ def my_event(data):
+ print('Received data: ', data)
+
+ A custom namespace can be given as an argument to the decorator::
+
+ @sio.event(namespace='/test')
+ def my_event(data):
+ print('Received data: ', data)
+ """
+ if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
+ # the decorator was invoked without arguments
+ # args[0] is the decorated function
+ return self.on(args[0].__name__)(args[0])
+ else:
+ # the decorator was invoked with arguments
+ def set_handler(handler):
+ return self.on(handler.__name__, *args, **kwargs)(handler)
+
+ return set_handler
+
+ def register_namespace(self, namespace_handler):
+ """Register a namespace handler object.
+
+ :param namespace_handler: An instance of a :class:`Namespace`
+ subclass that handles all the event traffic
+ for a namespace.
+ """
+ if not isinstance(namespace_handler,
+ base_namespace.BaseClientNamespace):
+ raise ValueError('Not a namespace instance')
+ if self.is_asyncio_based() != namespace_handler.is_asyncio_based():
+ raise ValueError('Not a valid namespace class for this client')
+ namespace_handler._set_client(self)
+ self.namespace_handlers[namespace_handler.namespace] = \
+ namespace_handler
+
+ def get_sid(self, namespace=None):
+ """Return the ``sid`` associated with a connection.
+
+ :param namespace: The Socket.IO namespace. If this argument is omitted
+ the handler is associated with the default
+ namespace. Note that unlike previous versions, the
+ current version of the Socket.IO protocol uses
+ different ``sid`` values per namespace.
+
+ This method returns the ``sid`` for the requested namespace as a
+ string.
+ """
+ return self.namespaces.get(namespace or '/')
+
+ def transport(self):
+ """Return the name of the transport used by the client.
+
+ The two possible values returned by this function are ``'polling'``
+ and ``'websocket'``.
+ """
+ return self.eio.transport()
+
+ def _get_event_handler(self, event, namespace, args):
+ # return the appropriate application event handler
+ #
+ # Resolution priority:
+ # - self.handlers[namespace][event]
+ # - self.handlers[namespace]["*"]
+ # - self.handlers["*"][event]
+ # - self.handlers["*"]["*"]
+ handler = None
+ if namespace in self.handlers:
+ if event in self.handlers[namespace]:
+ handler = self.handlers[namespace][event]
+ elif event not in self.reserved_events and \
+ '*' in self.handlers[namespace]:
+ handler = self.handlers[namespace]['*']
+ args = (event, *args)
+ elif '*' in self.handlers:
+ if event in self.handlers['*']:
+ handler = self.handlers['*'][event]
+ args = (namespace, *args)
+ elif event not in self.reserved_events and \
+ '*' in self.handlers['*']:
+ handler = self.handlers['*']['*']
+ args = (event, namespace, *args)
+ return handler, args
+
+ def _get_namespace_handler(self, namespace, args):
+ # Return the appropriate application event handler.
+ #
+ # Resolution priority:
+ # - self.namespace_handlers[namespace]
+ # - self.namespace_handlers["*"]
+ handler = None
+ if namespace in self.namespace_handlers:
+ handler = self.namespace_handlers[namespace]
+ elif '*' in self.namespace_handlers:
+ handler = self.namespace_handlers['*']
+ args = (namespace, *args)
+ return handler, args
+
+ def _generate_ack_id(self, namespace, callback):
+ """Generate a unique identifier for an ACK packet."""
+ namespace = namespace or '/'
+ if namespace not in self.callbacks:
+ self.callbacks[namespace] = {0: itertools.count(1)}
+ id = next(self.callbacks[namespace][0])
+ self.callbacks[namespace][id] = callback
+ return id
+
+ def _handle_eio_connect(self): # pragma: no cover
+ raise NotImplementedError()
+
+ def _handle_eio_message(self, data): # pragma: no cover
+ raise NotImplementedError()
+
+ def _handle_eio_disconnect(self): # pragma: no cover
+ raise NotImplementedError()
+
+ def _engineio_client_class(self): # pragma: no cover
+ raise NotImplementedError()
diff --git a/.venv/Lib/site-packages/socketio/base_manager.py b/.venv/Lib/site-packages/socketio/base_manager.py
new file mode 100644
index 0000000..ca4b0b9
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/base_manager.py
@@ -0,0 +1,162 @@
+import itertools
+import logging
+
+from bidict import bidict, ValueDuplicationError
+
+default_logger = logging.getLogger('socketio')
+
+
+class BaseManager:
+ def __init__(self):
+ self.logger = None
+ self.server = None
+ self.rooms = {} # self.rooms[namespace][room][sio_sid] = eio_sid
+ self.eio_to_sid = {}
+ self.callbacks = {}
+ self.pending_disconnect = {}
+
+ def set_server(self, server):
+ self.server = server
+
+ def initialize(self):
+ """Invoked before the first request is received. Subclasses can add
+ their initialization code here.
+ """
+ pass
+
+ def get_namespaces(self):
+ """Return an iterable with the active namespace names."""
+ return self.rooms.keys()
+
+ def get_participants(self, namespace, room):
+ """Return an iterable with the active participants in a room."""
+ ns = self.rooms.get(namespace, {})
+ if hasattr(room, '__len__') and not isinstance(room, str):
+ participants = ns[room[0]]._fwdm.copy() if room[0] in ns else {}
+ for r in room[1:]:
+ participants.update(ns[r]._fwdm if r in ns else {})
+ else:
+ participants = ns[room]._fwdm.copy() if room in ns else {}
+ for sid, eio_sid in participants.items():
+ yield sid, eio_sid
+
+ def connect(self, eio_sid, namespace):
+ """Register a client connection to a namespace."""
+ sid = self.server.eio.generate_id()
+ try:
+ self.basic_enter_room(sid, namespace, None, eio_sid=eio_sid)
+ except ValueDuplicationError:
+ # already connected
+ return None
+ self.basic_enter_room(sid, namespace, sid, eio_sid=eio_sid)
+ return sid
+
+ def is_connected(self, sid, namespace):
+ if namespace in self.pending_disconnect and \
+ sid in self.pending_disconnect[namespace]:
+ # the client is in the process of being disconnected
+ return False
+ try:
+ return self.rooms[namespace][None][sid] is not None
+ except KeyError:
+ pass
+ return False
+
+ def sid_from_eio_sid(self, eio_sid, namespace):
+ try:
+ return self.rooms[namespace][None]._invm[eio_sid]
+ except KeyError:
+ pass
+
+ def eio_sid_from_sid(self, sid, namespace):
+ if namespace in self.rooms:
+ return self.rooms[namespace][None].get(sid)
+
+ def pre_disconnect(self, sid, namespace):
+ """Put the client in the to-be-disconnected list.
+
+ This allows the client data structures to be present while the
+ disconnect handler is invoked, but still recognize the fact that the
+ client is soon going away.
+ """
+ if namespace not in self.pending_disconnect:
+ self.pending_disconnect[namespace] = []
+ self.pending_disconnect[namespace].append(sid)
+ return self.rooms[namespace][None].get(sid)
+
+ def basic_disconnect(self, sid, namespace, **kwargs):
+ if namespace not in self.rooms:
+ return
+ rooms = []
+ for room_name, room in self.rooms[namespace].copy().items():
+ if sid in room:
+ rooms.append(room_name)
+ for room in rooms:
+ self.basic_leave_room(sid, namespace, room)
+ if sid in self.callbacks:
+ del self.callbacks[sid]
+ if namespace in self.pending_disconnect and \
+ sid in self.pending_disconnect[namespace]:
+ self.pending_disconnect[namespace].remove(sid)
+ if len(self.pending_disconnect[namespace]) == 0:
+ del self.pending_disconnect[namespace]
+
+ def basic_enter_room(self, sid, namespace, room, eio_sid=None):
+ if eio_sid is None and namespace not in self.rooms:
+ raise ValueError('sid is not connected to requested namespace')
+ if namespace not in self.rooms:
+ self.rooms[namespace] = {}
+ if room not in self.rooms[namespace]:
+ self.rooms[namespace][room] = bidict()
+ if eio_sid is None:
+ eio_sid = self.rooms[namespace][None][sid]
+ self.rooms[namespace][room][sid] = eio_sid
+
+ def basic_leave_room(self, sid, namespace, room):
+ try:
+ del self.rooms[namespace][room][sid]
+ if len(self.rooms[namespace][room]) == 0:
+ del self.rooms[namespace][room]
+ if len(self.rooms[namespace]) == 0:
+ del self.rooms[namespace]
+ except KeyError:
+ pass
+
+ def basic_close_room(self, room, namespace):
+ try:
+ for sid, _ in self.get_participants(namespace, room):
+ self.basic_leave_room(sid, namespace, room)
+ except KeyError: # pragma: no cover
+ pass
+
+ def get_rooms(self, sid, namespace):
+ """Return the rooms a client is in."""
+ r = []
+ try:
+ for room_name, room in self.rooms[namespace].items():
+ if room_name is not None and sid in room:
+ r.append(room_name)
+ except KeyError:
+ pass
+ return r
+
+ def _generate_ack_id(self, sid, callback):
+ """Generate a unique identifier for an ACK packet."""
+ if sid not in self.callbacks:
+ self.callbacks[sid] = {0: itertools.count(1)}
+ id = next(self.callbacks[sid][0])
+ self.callbacks[sid][id] = callback
+ return id
+
+ def _get_logger(self):
+ """Get the appropriate logger
+
+ Prevents uninitialized servers in write-only mode from failing.
+ """
+
+ if self.logger:
+ return self.logger
+ elif self.server:
+ return self.server.logger
+ else:
+ return default_logger
diff --git a/.venv/Lib/site-packages/socketio/base_namespace.py b/.venv/Lib/site-packages/socketio/base_namespace.py
new file mode 100644
index 0000000..354f75a
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/base_namespace.py
@@ -0,0 +1,33 @@
+class BaseNamespace(object):
+ def __init__(self, namespace=None):
+ self.namespace = namespace or '/'
+
+ def is_asyncio_based(self):
+ return False
+
+
+class BaseServerNamespace(BaseNamespace):
+ def __init__(self, namespace=None):
+ super().__init__(namespace=namespace)
+ self.server = None
+
+ def _set_server(self, server):
+ self.server = server
+
+ def rooms(self, sid, namespace=None):
+ """Return the rooms a client is in.
+
+ The only difference with the :func:`socketio.Server.rooms` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.rooms(sid, namespace=namespace or self.namespace)
+
+
+class BaseClientNamespace(BaseNamespace):
+ def __init__(self, namespace=None):
+ super().__init__(namespace=namespace)
+ self.client = None
+
+ def _set_client(self, client):
+ self.client = client
diff --git a/.venv/Lib/site-packages/socketio/base_server.py b/.venv/Lib/site-packages/socketio/base_server.py
new file mode 100644
index 0000000..d5a353b
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/base_server.py
@@ -0,0 +1,263 @@
+import logging
+
+from . import manager
+from . import base_namespace
+from . import packet
+
+default_logger = logging.getLogger('socketio.server')
+
+
+class BaseServer:
+ reserved_events = ['connect', 'disconnect']
+
+ def __init__(self, client_manager=None, logger=False, serializer='default',
+ json=None, async_handlers=True, always_connect=False,
+ namespaces=None, **kwargs):
+ engineio_options = kwargs
+ engineio_logger = engineio_options.pop('engineio_logger', None)
+ if engineio_logger is not None:
+ engineio_options['logger'] = engineio_logger
+ if serializer == 'default':
+ self.packet_class = packet.Packet
+ elif serializer == 'msgpack':
+ from . import msgpack_packet
+ self.packet_class = msgpack_packet.MsgPackPacket
+ else:
+ self.packet_class = serializer
+ if json is not None:
+ self.packet_class.json = json
+ engineio_options['json'] = json
+ engineio_options['async_handlers'] = False
+ self.eio = self._engineio_server_class()(**engineio_options)
+ self.eio.on('connect', self._handle_eio_connect)
+ self.eio.on('message', self._handle_eio_message)
+ self.eio.on('disconnect', self._handle_eio_disconnect)
+
+ self.environ = {}
+ self.handlers = {}
+ self.namespace_handlers = {}
+ self.not_handled = object()
+
+ self._binary_packet = {}
+
+ if not isinstance(logger, bool):
+ self.logger = logger
+ else:
+ self.logger = default_logger
+ if self.logger.level == logging.NOTSET:
+ if logger:
+ self.logger.setLevel(logging.INFO)
+ else:
+ self.logger.setLevel(logging.ERROR)
+ self.logger.addHandler(logging.StreamHandler())
+
+ if client_manager is None:
+ client_manager = manager.Manager()
+ self.manager = client_manager
+ self.manager.set_server(self)
+ self.manager_initialized = False
+
+ self.async_handlers = async_handlers
+ self.always_connect = always_connect
+ self.namespaces = namespaces or ['/']
+
+ self.async_mode = self.eio.async_mode
+
+ def is_asyncio_based(self):
+ return False
+
+ def on(self, event, handler=None, namespace=None):
+ """Register an event handler.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used. The ``'*'`` event name
+ can be used to define a catch-all event handler.
+ :param handler: The function that should be invoked to handle the
+ event. When this parameter is not given, the method
+ acts as a decorator for the handler function.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the handler is associated with
+ the default namespace. A catch-all namespace can be
+ defined by passing ``'*'`` as the namespace.
+
+ Example usage::
+
+ # as a decorator:
+ @sio.on('connect', namespace='/chat')
+ def connect_handler(sid, environ):
+ print('Connection request')
+ if environ['REMOTE_ADDR'] in blacklisted:
+ return False # reject
+
+ # as a method:
+ def message_handler(sid, msg):
+ print('Received message: ', msg)
+ sio.send(sid, 'response')
+ socket_io.on('message', namespace='/chat', handler=message_handler)
+
+ The arguments passed to the handler function depend on the event type:
+
+ - The ``'connect'`` event handler receives the ``sid`` (session ID) for
+ the client and the WSGI environment dictionary as arguments.
+ - The ``'disconnect'`` handler receives the ``sid`` for the client as
+ only argument.
+ - The ``'message'`` handler and handlers for custom event names receive
+ the ``sid`` for the client and the message payload as arguments. Any
+ values returned from a message handler will be passed to the client's
+ acknowledgement callback function if it exists.
+ - A catch-all event handler receives the event name as first argument,
+ followed by any arguments specific to the event.
+ - A catch-all namespace event handler receives the namespace as first
+ argument, followed by any arguments specific to the event.
+ - A combined catch-all namespace and catch-all event handler receives
+ the event name as first argument and the namespace as second
+ argument, followed by any arguments specific to the event.
+ """
+ namespace = namespace or '/'
+
+ def set_handler(handler):
+ if namespace not in self.handlers:
+ self.handlers[namespace] = {}
+ self.handlers[namespace][event] = handler
+ return handler
+
+ if handler is None:
+ return set_handler
+ set_handler(handler)
+
+ def event(self, *args, **kwargs):
+ """Decorator to register an event handler.
+
+ This is a simplified version of the ``on()`` method that takes the
+ event name from the decorated function.
+
+ Example usage::
+
+ @sio.event
+ def my_event(data):
+ print('Received data: ', data)
+
+ The above example is equivalent to::
+
+ @sio.on('my_event')
+ def my_event(data):
+ print('Received data: ', data)
+
+ A custom namespace can be given as an argument to the decorator::
+
+ @sio.event(namespace='/test')
+ def my_event(data):
+ print('Received data: ', data)
+ """
+ if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
+ # the decorator was invoked without arguments
+ # args[0] is the decorated function
+ return self.on(args[0].__name__)(args[0])
+ else:
+ # the decorator was invoked with arguments
+ def set_handler(handler):
+ return self.on(handler.__name__, *args, **kwargs)(handler)
+
+ return set_handler
+
+ def register_namespace(self, namespace_handler):
+ """Register a namespace handler object.
+
+ :param namespace_handler: An instance of a :class:`Namespace`
+ subclass that handles all the event traffic
+ for a namespace.
+ """
+ if not isinstance(namespace_handler,
+ base_namespace.BaseServerNamespace):
+ raise ValueError('Not a namespace instance')
+ if self.is_asyncio_based() != namespace_handler.is_asyncio_based():
+ raise ValueError('Not a valid namespace class for this server')
+ namespace_handler._set_server(self)
+ self.namespace_handlers[namespace_handler.namespace] = \
+ namespace_handler
+
+ def rooms(self, sid, namespace=None):
+ """Return the rooms a client is in.
+
+ :param sid: Session ID of the client.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the default namespace is used.
+ """
+ namespace = namespace or '/'
+ return self.manager.get_rooms(sid, namespace)
+
+ def transport(self, sid, namespace=None):
+ """Return the name of the transport used by the client.
+
+ The two possible values returned by this function are ``'polling'``
+ and ``'websocket'``.
+
+ :param sid: The session of the client.
+ :param namespace: The Socket.IO namespace. If this argument is omitted
+ the default namespace is used.
+ """
+ eio_sid = self.manager.eio_sid_from_sid(sid, namespace or '/')
+ return self.eio.transport(eio_sid)
+
+ def get_environ(self, sid, namespace=None):
+ """Return the WSGI environ dictionary for a client.
+
+ :param sid: The session of the client.
+ :param namespace: The Socket.IO namespace. If this argument is omitted
+ the default namespace is used.
+ """
+ eio_sid = self.manager.eio_sid_from_sid(sid, namespace or '/')
+ return self.environ.get(eio_sid)
+
+ def _get_event_handler(self, event, namespace, args):
+ # Return the appropriate application event handler
+ #
+ # Resolution priority:
+ # - self.handlers[namespace][event]
+ # - self.handlers[namespace]["*"]
+ # - self.handlers["*"][event]
+ # - self.handlers["*"]["*"]
+ handler = None
+ if namespace in self.handlers:
+ if event in self.handlers[namespace]:
+ handler = self.handlers[namespace][event]
+ elif event not in self.reserved_events and \
+ '*' in self.handlers[namespace]:
+ handler = self.handlers[namespace]['*']
+ args = (event, *args)
+ if handler is None and '*' in self.handlers:
+ if event in self.handlers['*']:
+ handler = self.handlers['*'][event]
+ args = (namespace, *args)
+ elif event not in self.reserved_events and \
+ '*' in self.handlers['*']:
+ handler = self.handlers['*']['*']
+ args = (event, namespace, *args)
+ return handler, args
+
+ def _get_namespace_handler(self, namespace, args):
+ # Return the appropriate application event handler.
+ #
+ # Resolution priority:
+ # - self.namespace_handlers[namespace]
+ # - self.namespace_handlers["*"]
+ handler = None
+ if namespace in self.namespace_handlers:
+ handler = self.namespace_handlers[namespace]
+ if handler is None and '*' in self.namespace_handlers:
+ handler = self.namespace_handlers['*']
+ args = (namespace, *args)
+ return handler, args
+
+ def _handle_eio_connect(self): # pragma: no cover
+ raise NotImplementedError()
+
+ def _handle_eio_message(self, data): # pragma: no cover
+ raise NotImplementedError()
+
+ def _handle_eio_disconnect(self): # pragma: no cover
+ raise NotImplementedError()
+
+ def _engineio_server_class(self): # pragma: no cover
+ raise NotImplementedError('Must be implemented in subclasses')
diff --git a/.venv/Lib/site-packages/socketio/client.py b/.venv/Lib/site-packages/socketio/client.py
new file mode 100644
index 0000000..d7af407
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/client.py
@@ -0,0 +1,542 @@
+import random
+
+import engineio
+
+from . import base_client
+from . import exceptions
+from . import packet
+
+
+class Client(base_client.BaseClient):
+ """A Socket.IO client.
+
+ This class implements a fully compliant Socket.IO web client with support
+ for websocket and long-polling transports.
+
+ :param reconnection: ``True`` if the client should automatically attempt to
+ reconnect to the server after an interruption, or
+ ``False`` to not reconnect. The default is ``True``.
+ :param reconnection_attempts: How many reconnection attempts to issue
+ before giving up, or 0 for infinite attempts.
+ The default is 0.
+ :param reconnection_delay: How long to wait in seconds before the first
+ reconnection attempt. Each successive attempt
+ doubles this delay.
+ :param reconnection_delay_max: The maximum delay between reconnection
+ attempts.
+ :param randomization_factor: Randomization amount for each delay between
+ reconnection attempts. The default is 0.5,
+ which means that each delay is randomly
+ adjusted by +/- 50%.
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. The default is
+ ``False``. Note that fatal errors are logged even when
+ ``logger`` is ``False``.
+ :param serializer: The serialization method to use when transmitting
+ packets. Valid values are ``'default'``, ``'pickle'``,
+ ``'msgpack'`` and ``'cbor'``. Alternatively, a subclass
+ of the :class:`Packet` class with custom implementations
+ of the ``encode()`` and ``decode()`` methods can be
+ provided. Client and server must use compatible
+ serializers.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions.
+ :param handle_sigint: Set to ``True`` to automatically handle disconnection
+ when the process is interrupted, or to ``False`` to
+ leave interrupt handling to the calling application.
+ Interrupt handling can only be enabled when the
+ client instance is created in the main thread.
+
+ The Engine.IO configuration supports the following settings:
+
+ :param request_timeout: A timeout in seconds for requests. The default is
+ 5 seconds.
+ :param http_session: an initialized ``requests.Session`` object to be used
+ when sending requests to the server. Use it if you
+ need to add special client options such as proxy
+ servers, SSL certificates, custom CA bundle, etc.
+ :param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
+ skip SSL certificate verification, allowing
+ connections to servers with self signed certificates.
+ The default is ``True``.
+ :param websocket_extra_options: Dictionary containing additional keyword
+ arguments passed to
+ ``websocket.create_connection()``.
+ :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
+ a logger object to use. To disable logging set to
+ ``False``. The default is ``False``. Note that
+ fatal errors are logged even when
+ ``engineio_logger`` is ``False``.
+ """
+ def connect(self, url, headers={}, auth=None, transports=None,
+ namespaces=None, socketio_path='socket.io', wait=True,
+ wait_timeout=1, retry=False):
+ """Connect to a Socket.IO server.
+
+ :param url: The URL of the Socket.IO server. It can include custom
+ query string parameters if required by the server. If a
+ function is provided, the client will invoke it to obtain
+ the URL each time a connection or reconnection is
+ attempted.
+ :param headers: A dictionary with custom headers to send with the
+ connection request. If a function is provided, the
+ client will invoke it to obtain the headers dictionary
+ each time a connection or reconnection is attempted.
+ :param auth: Authentication data passed to the server with the
+ connection request, normally a dictionary with one or
+ more string key/value pairs. If a function is provided,
+ the client will invoke it to obtain the authentication
+ data each time a connection or reconnection is attempted.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. If not
+ given, the polling transport is connected first,
+ then an upgrade to websocket is attempted.
+ :param namespaces: The namespaces to connect as a string or list of
+ strings. If not given, the namespaces that have
+ registered event handlers are connected.
+ :param socketio_path: The endpoint where the Socket.IO server is
+ installed. The default value is appropriate for
+ most cases.
+ :param wait: if set to ``True`` (the default) the call only returns
+ when all the namespaces are connected. If set to
+ ``False``, the call returns as soon as the Engine.IO
+ transport is connected, and the namespaces will connect
+ in the background.
+ :param wait_timeout: How long the client should wait for the
+ connection. The default is 1 second. This
+ argument is only considered when ``wait`` is set
+ to ``True``.
+ :param retry: Apply the reconnection logic if the initial connection
+ attempt fails. The default is ``False``.
+
+ Example usage::
+
+ sio = socketio.Client()
+ sio.connect('http://localhost:5000')
+ """
+ if self.connected:
+ raise exceptions.ConnectionError('Already connected')
+
+ self.connection_url = url
+ self.connection_headers = headers
+ self.connection_auth = auth
+ self.connection_transports = transports
+ self.connection_namespaces = namespaces
+ self.socketio_path = socketio_path
+
+ if namespaces is None:
+ namespaces = list(set(self.handlers.keys()).union(
+ set(self.namespace_handlers.keys())))
+ if '*' in namespaces:
+ namespaces.remove('*')
+ if len(namespaces) == 0:
+ namespaces = ['/']
+ elif isinstance(namespaces, str):
+ namespaces = [namespaces]
+ self.connection_namespaces = namespaces
+ self.namespaces = {}
+ if self._connect_event is None:
+ self._connect_event = self.eio.create_event()
+ else:
+ self._connect_event.clear()
+ real_url = self._get_real_value(self.connection_url)
+ real_headers = self._get_real_value(self.connection_headers)
+ try:
+ self.eio.connect(real_url, headers=real_headers,
+ transports=transports,
+ engineio_path=socketio_path)
+ except engineio.exceptions.ConnectionError as exc:
+ for n in self.connection_namespaces:
+ self._trigger_event(
+ 'connect_error', n,
+ exc.args[1] if len(exc.args) > 1 else exc.args[0])
+ if retry: # pragma: no cover
+ self._handle_reconnect()
+ if self.eio.state == 'connected':
+ return
+ raise exceptions.ConnectionError(exc.args[0]) from None
+
+ if wait:
+ while self._connect_event.wait(timeout=wait_timeout):
+ self._connect_event.clear()
+ if set(self.namespaces) == set(self.connection_namespaces):
+ break
+ if set(self.namespaces) != set(self.connection_namespaces):
+ self.disconnect()
+ raise exceptions.ConnectionError(
+ 'One or more namespaces failed to connect')
+
+ self.connected = True
+
+ def wait(self):
+ """Wait until the connection with the server ends.
+
+ Client applications can use this function to block the main thread
+ during the life of the connection.
+ """
+ while True:
+ self.eio.wait()
+ self.sleep(1) # give the reconnect task time to start up
+ if not self._reconnect_task:
+ break
+ self._reconnect_task.join()
+ if self.eio.state != 'connected':
+ break
+
+ def emit(self, event, data=None, namespace=None, callback=None):
+ """Emit a custom event to the server.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param callback: If given, this function will be called to acknowledge
+ the server has received the message. The arguments
+ that will be passed to the function are those provided
+ by the server.
+
+ Note: this method is not thread safe. If multiple threads are emitting
+ at the same time on the same client connection, messages composed of
+ multiple packets may end up being sent in an incorrect sequence. Use
+ standard concurrency solutions (such as a Lock object) to prevent this
+ situation.
+ """
+ namespace = namespace or '/'
+ if namespace not in self.namespaces:
+ raise exceptions.BadNamespaceError(
+ namespace + ' is not a connected namespace.')
+ self.logger.info('Emitting event "%s" [%s]', event, namespace)
+ if callback is not None:
+ id = self._generate_ack_id(namespace, callback)
+ else:
+ id = None
+ # tuples are expanded to multiple arguments, everything else is sent
+ # as a single argument
+ if isinstance(data, tuple):
+ data = list(data)
+ elif data is not None:
+ data = [data]
+ else:
+ data = []
+ self._send_packet(self.packet_class(packet.EVENT, namespace=namespace,
+ data=[event] + data, id=id))
+
+ def send(self, data, namespace=None, callback=None):
+ """Send a message to the server.
+
+ This function emits an event with the name ``'message'``. Use
+ :func:`emit` to issue custom event names.
+
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param callback: If given, this function will be called to acknowledge
+ the server has received the message. The arguments
+ that will be passed to the function are those provided
+ by the server.
+ """
+ self.emit('message', data=data, namespace=namespace,
+ callback=callback)
+
+ def call(self, event, data=None, namespace=None, timeout=60):
+ """Emit a custom event to the server and wait for the response.
+
+ This method issues an emit with a callback and waits for the callback
+ to be invoked before returning. If the callback isn't invoked before
+ the timeout, then a ``TimeoutError`` exception is raised. If the
+ Socket.IO connection drops during the wait, this method still waits
+ until the specified timeout.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the server acknowledges the event, then a
+ ``TimeoutError`` exception is raised.
+
+ Note: this method is not thread safe. If multiple threads are emitting
+ at the same time on the same client connection, messages composed of
+ multiple packets may end up being sent in an incorrect sequence. Use
+ standard concurrency solutions (such as a Lock object) to prevent this
+ situation.
+ """
+ callback_event = self.eio.create_event()
+ callback_args = []
+
+ def event_callback(*args):
+ callback_args.append(args)
+ callback_event.set()
+
+ self.emit(event, data=data, namespace=namespace,
+ callback=event_callback)
+ if not callback_event.wait(timeout=timeout):
+ raise exceptions.TimeoutError()
+ return callback_args[0] if len(callback_args[0]) > 1 \
+ else callback_args[0][0] if len(callback_args[0]) == 1 \
+ else None
+
+ def disconnect(self):
+ """Disconnect from the server."""
+ # here we just request the disconnection
+ # later in _handle_eio_disconnect we invoke the disconnect handler
+ for n in self.namespaces:
+ self._send_packet(self.packet_class(
+ packet.DISCONNECT, namespace=n))
+ self.eio.disconnect(abort=True)
+
+ def shutdown(self):
+ """Stop the client.
+
+ If the client is connected to a server, it is disconnected. If the
+ client is attempting to reconnect to server, the reconnection attempts
+ are stopped. If the client is not connected to a server and is not
+ attempting to reconnect, then this function does nothing.
+ """
+ if self.connected:
+ self.disconnect()
+ elif self._reconnect_task: # pragma: no branch
+ self._reconnect_abort.set()
+ self._reconnect_task.join()
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task using the appropriate async model.
+
+ This is a utility function that applications can use to start a
+ background task using the method that is compatible with the
+ selected async mode.
+
+ :param target: the target function to execute.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ This function returns an object that represents the background task,
+ on which the ``join()`` methond can be invoked to wait for the task to
+ complete.
+ """
+ return self.eio.start_background_task(target, *args, **kwargs)
+
+ def sleep(self, seconds=0):
+ """Sleep for the requested amount of time using the appropriate async
+ model.
+
+ This is a utility function that applications can use to put a task to
+ sleep without having to worry about using the correct call for the
+ selected async mode.
+ """
+ return self.eio.sleep(seconds)
+
+ def _get_real_value(self, value):
+ """Return the actual value, for parameters that can also be given as
+ callables."""
+ if not callable(value):
+ return value
+ return value()
+
+ def _send_packet(self, pkt):
+ """Send a Socket.IO packet to the server."""
+ encoded_packet = pkt.encode()
+ if isinstance(encoded_packet, list):
+ for ep in encoded_packet:
+ self.eio.send(ep)
+ else:
+ self.eio.send(encoded_packet)
+
+ def _handle_connect(self, namespace, data):
+ namespace = namespace or '/'
+ if namespace not in self.namespaces:
+ self.logger.info('Namespace {} is connected'.format(namespace))
+ self.namespaces[namespace] = (data or {}).get('sid', self.sid)
+ self._trigger_event('connect', namespace=namespace)
+ self._connect_event.set()
+
+ def _handle_disconnect(self, namespace):
+ if not self.connected:
+ return
+ namespace = namespace or '/'
+ self._trigger_event('disconnect', namespace=namespace)
+ self._trigger_event('__disconnect_final', namespace=namespace)
+ if namespace in self.namespaces:
+ del self.namespaces[namespace]
+ if not self.namespaces:
+ self.connected = False
+ self.eio.disconnect(abort=True)
+
+ def _handle_event(self, namespace, id, data):
+ namespace = namespace or '/'
+ self.logger.info('Received event "%s" [%s]', data[0], namespace)
+ r = self._trigger_event(data[0], namespace, *data[1:])
+ if id is not None:
+ # send ACK packet with the response returned by the handler
+ # tuples are expanded as multiple arguments
+ if r is None:
+ data = []
+ elif isinstance(r, tuple):
+ data = list(r)
+ else:
+ data = [r]
+ self._send_packet(self.packet_class(
+ packet.ACK, namespace=namespace, id=id, data=data))
+
+ def _handle_ack(self, namespace, id, data):
+ namespace = namespace or '/'
+ self.logger.info('Received ack [%s]', namespace)
+ callback = None
+ try:
+ callback = self.callbacks[namespace][id]
+ except KeyError:
+ # if we get an unknown callback we just ignore it
+ self.logger.warning('Unknown callback received, ignoring.')
+ else:
+ del self.callbacks[namespace][id]
+ if callback is not None:
+ callback(*data)
+
+ def _handle_error(self, namespace, data):
+ namespace = namespace or '/'
+ self.logger.info('Connection to namespace {} was rejected'.format(
+ namespace))
+ if data is None:
+ data = tuple()
+ elif not isinstance(data, (tuple, list)):
+ data = (data,)
+ self._trigger_event('connect_error', namespace, *data)
+ self._connect_event.set()
+ if namespace in self.namespaces:
+ del self.namespaces[namespace]
+ if namespace == '/':
+ self.namespaces = {}
+ self.connected = False
+
+ def _trigger_event(self, event, namespace, *args):
+ """Invoke an application event handler."""
+ # first see if we have an explicit handler for the event
+ handler, args = self._get_event_handler(event, namespace, args)
+ if handler:
+ return handler(*args)
+
+ # or else, forward the event to a namespace handler if one exists
+ handler, args = self._get_namespace_handler(namespace, args)
+ if handler:
+ return handler.trigger_event(event, *args)
+
+ def _handle_reconnect(self):
+ if self._reconnect_abort is None: # pragma: no cover
+ self._reconnect_abort = self.eio.create_event()
+ self._reconnect_abort.clear()
+ base_client.reconnecting_clients.append(self)
+ attempt_count = 0
+ current_delay = self.reconnection_delay
+ while True:
+ delay = current_delay
+ current_delay *= 2
+ if delay > self.reconnection_delay_max:
+ delay = self.reconnection_delay_max
+ delay += self.randomization_factor * (2 * random.random() - 1)
+ self.logger.info(
+ 'Connection failed, new attempt in {:.02f} seconds'.format(
+ delay))
+ if self._reconnect_abort.wait(delay):
+ self.logger.info('Reconnect task aborted')
+ for n in self.connection_namespaces:
+ self._trigger_event('__disconnect_final', namespace=n)
+ break
+ attempt_count += 1
+ try:
+ self.connect(self.connection_url,
+ headers=self.connection_headers,
+ auth=self.connection_auth,
+ transports=self.connection_transports,
+ namespaces=self.connection_namespaces,
+ socketio_path=self.socketio_path,
+ retry=False)
+ except (exceptions.ConnectionError, ValueError):
+ pass
+ else:
+ self.logger.info('Reconnection successful')
+ self._reconnect_task = None
+ break
+ if self.reconnection_attempts and \
+ attempt_count >= self.reconnection_attempts:
+ self.logger.info(
+ 'Maximum reconnection attempts reached, giving up')
+ for n in self.connection_namespaces:
+ self._trigger_event('__disconnect_final', namespace=n)
+ break
+ base_client.reconnecting_clients.remove(self)
+
+ def _handle_eio_connect(self):
+ """Handle the Engine.IO connection event."""
+ self.logger.info('Engine.IO connection established')
+ self.sid = self.eio.sid
+ real_auth = self._get_real_value(self.connection_auth) or {}
+ for n in self.connection_namespaces:
+ self._send_packet(self.packet_class(
+ packet.CONNECT, data=real_auth, namespace=n))
+
+ def _handle_eio_message(self, data):
+ """Dispatch Engine.IO messages."""
+ if self._binary_packet:
+ pkt = self._binary_packet
+ if pkt.add_attachment(data):
+ self._binary_packet = None
+ if pkt.packet_type == packet.BINARY_EVENT:
+ self._handle_event(pkt.namespace, pkt.id, pkt.data)
+ else:
+ self._handle_ack(pkt.namespace, pkt.id, pkt.data)
+ else:
+ pkt = self.packet_class(encoded_packet=data)
+ if pkt.packet_type == packet.CONNECT:
+ self._handle_connect(pkt.namespace, pkt.data)
+ elif pkt.packet_type == packet.DISCONNECT:
+ self._handle_disconnect(pkt.namespace)
+ elif pkt.packet_type == packet.EVENT:
+ self._handle_event(pkt.namespace, pkt.id, pkt.data)
+ elif pkt.packet_type == packet.ACK:
+ self._handle_ack(pkt.namespace, pkt.id, pkt.data)
+ elif pkt.packet_type == packet.BINARY_EVENT or \
+ pkt.packet_type == packet.BINARY_ACK:
+ self._binary_packet = pkt
+ elif pkt.packet_type == packet.CONNECT_ERROR:
+ self._handle_error(pkt.namespace, pkt.data)
+ else:
+ raise ValueError('Unknown packet type.')
+
+ def _handle_eio_disconnect(self):
+ """Handle the Engine.IO disconnection event."""
+ self.logger.info('Engine.IO connection dropped')
+ will_reconnect = self.reconnection and self.eio.state == 'connected'
+ if self.connected:
+ for n in self.namespaces:
+ self._trigger_event('disconnect', namespace=n)
+ if not will_reconnect:
+ self._trigger_event('__disconnect_final', namespace=n)
+ self.namespaces = {}
+ self.connected = False
+ self.callbacks = {}
+ self._binary_packet = None
+ self.sid = None
+ if will_reconnect:
+ self._reconnect_task = self.start_background_task(
+ self._handle_reconnect)
+
+ def _engineio_client_class(self):
+ return engineio.Client
diff --git a/.venv/Lib/site-packages/socketio/exceptions.py b/.venv/Lib/site-packages/socketio/exceptions.py
new file mode 100644
index 0000000..19d6e39
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/exceptions.py
@@ -0,0 +1,38 @@
+class SocketIOError(Exception):
+ pass
+
+
+class ConnectionError(SocketIOError):
+ pass
+
+
+class ConnectionRefusedError(ConnectionError):
+ """Connection refused exception.
+
+ This exception can be raised from a connect handler when the connection
+ is not accepted. The positional arguments provided with the exception are
+ returned with the error packet to the client.
+ """
+ def __init__(self, *args):
+ if len(args) == 0:
+ self.error_args = {'message': 'Connection rejected by server'}
+ elif len(args) == 1:
+ self.error_args = {'message': str(args[0])}
+ else:
+ self.error_args = {'message': str(args[0])}
+ if len(args) == 2:
+ self.error_args['data'] = args[1]
+ else:
+ self.error_args['data'] = args[1:]
+
+
+class TimeoutError(SocketIOError):
+ pass
+
+
+class BadNamespaceError(SocketIOError):
+ pass
+
+
+class DisconnectedError(SocketIOError):
+ pass
diff --git a/.venv/Lib/site-packages/socketio/kafka_manager.py b/.venv/Lib/site-packages/socketio/kafka_manager.py
new file mode 100644
index 0000000..4d87d46
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/kafka_manager.py
@@ -0,0 +1,66 @@
+import logging
+import pickle
+
+try:
+ import kafka
+except ImportError:
+ kafka = None
+
+from .pubsub_manager import PubSubManager
+
+logger = logging.getLogger('socketio')
+
+
+class KafkaManager(PubSubManager): # pragma: no cover
+ """Kafka based client manager.
+
+ This class implements a Kafka backend for event sharing across multiple
+ processes.
+
+ To use a Kafka backend, initialize the :class:`Server` instance as
+ follows::
+
+ url = 'kafka://hostname:port'
+ server = socketio.Server(client_manager=socketio.KafkaManager(url))
+
+ :param url: The connection URL for the Kafka server. For a default Kafka
+ store running on the same host, use ``kafka://``. For a highly
+ available deployment of Kafka, pass a list with all the
+ connection URLs available in your cluster.
+ :param channel: The channel name (topic) on which the server sends and
+ receives notifications. Must be the same in all the
+ servers.
+ :param write_only: If set to ``True``, only initialize to emit events. The
+ default of ``False`` initializes the class for emitting
+ and receiving.
+ """
+ name = 'kafka'
+
+ def __init__(self, url='kafka://localhost:9092', channel='socketio',
+ write_only=False):
+ if kafka is None:
+ raise RuntimeError('kafka-python package is not installed '
+ '(Run "pip install kafka-python" in your '
+ 'virtualenv).')
+
+ super().__init__(channel=channel, write_only=write_only)
+
+ urls = [url] if isinstance(url, str) else url
+ self.kafka_urls = [url[8:] if url != 'kafka://' else 'localhost:9092'
+ for url in urls]
+ self.producer = kafka.KafkaProducer(bootstrap_servers=self.kafka_urls)
+ self.consumer = kafka.KafkaConsumer(self.channel,
+ bootstrap_servers=self.kafka_urls)
+
+ def _publish(self, data):
+ self.producer.send(self.channel, value=pickle.dumps(data))
+ self.producer.flush()
+
+ def _kafka_listen(self):
+ for message in self.consumer:
+ yield message
+
+ def _listen(self):
+ for message in self._kafka_listen():
+ if message.topic == self.channel:
+ yield pickle.loads(message.value)
diff --git a/.venv/Lib/site-packages/socketio/kombu_manager.py b/.venv/Lib/site-packages/socketio/kombu_manager.py
new file mode 100644
index 0000000..0a63bc2
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/kombu_manager.py
@@ -0,0 +1,134 @@
+import pickle
+import time
+import uuid
+
+try:
+ import kombu
+except ImportError:
+ kombu = None
+
+from .pubsub_manager import PubSubManager
+
+
+class KombuManager(PubSubManager): # pragma: no cover
+ """Client manager that uses kombu for inter-process messaging.
+
+ This class implements a client manager backend for event sharing across
+ multiple processes, using RabbitMQ, Redis or any other messaging mechanism
+ supported by `kombu `_.
+
+ To use a kombu backend, initialize the :class:`Server` instance as
+ follows::
+
+ url = 'amqp://user:password@hostname:port//'
+ server = socketio.Server(client_manager=socketio.KombuManager(url))
+
+ :param url: The connection URL for the backend messaging queue. Example
+ connection URLs are ``'amqp://guest:guest@localhost:5672//'``
+ and ``'redis://localhost:6379/'`` for RabbitMQ and Redis
+ respectively. Consult the `kombu documentation
+ `_ for more on how to construct
+ connection URLs.
+ :param channel: The channel name on which the server sends and receives
+ notifications. Must be the same in all the servers.
+ :param write_only: If set to ``True``, only initialize to emit events. The
+ default of ``False`` initializes the class for emitting
+ and receiving.
+ :param connection_options: additional keyword arguments to be passed to
+ ``kombu.Connection()``.
+ :param exchange_options: additional keyword arguments to be passed to
+ ``kombu.Exchange()``.
+ :param queue_options: additional keyword arguments to be passed to
+ ``kombu.Queue()``.
+ :param producer_options: additional keyword arguments to be passed to
+ ``kombu.Producer()``.
+ """
+ name = 'kombu'
+
+ def __init__(self, url='amqp://guest:guest@localhost:5672//',
+ channel='socketio', write_only=False, logger=None,
+ connection_options=None, exchange_options=None,
+ queue_options=None, producer_options=None):
+ if kombu is None:
+ raise RuntimeError('Kombu package is not installed '
+ '(Run "pip install kombu" in your '
+ 'virtualenv).')
+ super().__init__(channel=channel, write_only=write_only, logger=logger)
+ self.url = url
+ self.connection_options = connection_options or {}
+ self.exchange_options = exchange_options or {}
+ self.queue_options = queue_options or {}
+ self.producer_options = producer_options or {}
+ self.publisher_connection = self._connection()
+
+ def initialize(self):
+ super().initialize()
+
+ monkey_patched = True
+ if self.server.async_mode == 'eventlet':
+ from eventlet.patcher import is_monkey_patched
+ monkey_patched = is_monkey_patched('socket')
+ elif 'gevent' in self.server.async_mode:
+ from gevent.monkey import is_module_patched
+ monkey_patched = is_module_patched('socket')
+ if not monkey_patched:
+ raise RuntimeError(
+ 'Kombu requires a monkey patched socket library to work '
+ 'with ' + self.server.async_mode)
+
+ def _connection(self):
+ return kombu.Connection(self.url, **self.connection_options)
+
+ def _exchange(self):
+ options = {'type': 'fanout', 'durable': False}
+ options.update(self.exchange_options)
+ return kombu.Exchange(self.channel, **options)
+
+ def _queue(self):
+ queue_name = 'flask-socketio.' + str(uuid.uuid4())
+ options = {'durable': False, 'queue_arguments': {'x-expires': 300000}}
+ options.update(self.queue_options)
+ return kombu.Queue(queue_name, self._exchange(), **options)
+
+ def _producer_publish(self, connection):
+ producer = connection.Producer(exchange=self._exchange(),
+ **self.producer_options)
+ return connection.ensure(producer, producer.publish)
+
+ def _publish(self, data):
+ retry = True
+ while True:
+ try:
+ producer_publish = self._producer_publish(
+ self.publisher_connection)
+ producer_publish(pickle.dumps(data))
+ break
+ except (OSError, kombu.exceptions.KombuError):
+ if retry:
+ self._get_logger().error('Cannot publish to rabbitmq... '
+ 'retrying')
+ retry = False
+ else:
+ self._get_logger().error(
+ 'Cannot publish to rabbitmq... giving up')
+ break
+
+ def _listen(self):
+ reader_queue = self._queue()
+ retry_sleep = 1
+ while True:
+ try:
+ with self._connection() as connection:
+ with connection.SimpleQueue(reader_queue) as queue:
+ while True:
+ message = queue.get(block=True)
+ message.ack()
+ yield message.payload
+ retry_sleep = 1
+ except (OSError, kombu.exceptions.KombuError):
+ self._get_logger().error(
+ 'Cannot receive from rabbitmq... '
+ 'retrying in {} secs'.format(retry_sleep))
+ time.sleep(retry_sleep)
+ retry_sleep = min(retry_sleep * 2, 60)
diff --git a/.venv/Lib/site-packages/socketio/manager.py b/.venv/Lib/site-packages/socketio/manager.py
new file mode 100644
index 0000000..813c4af
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/manager.py
@@ -0,0 +1,92 @@
+import logging
+
+from engineio import packet as eio_packet
+from . import base_manager
+from . import packet
+
+default_logger = logging.getLogger('socketio')
+
+
+class Manager(base_manager.BaseManager):
+ """Manage client connections.
+
+ This class keeps track of all the clients and the rooms they are in, to
+ support the broadcasting of messages. The data used by this class is
+ stored in a memory structure, making it appropriate only for single process
+ services. More sophisticated storage backends can be implemented by
+ subclasses.
+ """
+ def can_disconnect(self, sid, namespace):
+ return self.is_connected(sid, namespace)
+
+ def emit(self, event, data, namespace, room=None, skip_sid=None,
+ callback=None, **kwargs):
+ """Emit a message to a single client, a room, or all the clients
+ connected to the namespace."""
+ if namespace not in self.rooms:
+ return
+ if isinstance(data, tuple):
+ # tuples are expanded to multiple arguments, everything else is
+ # sent as a single argument
+ data = list(data)
+ elif data is not None:
+ data = [data]
+ else:
+ data = []
+ if not isinstance(skip_sid, list):
+ skip_sid = [skip_sid]
+ if not callback:
+ # when callbacks aren't used the packets sent to each recipient are
+ # identical, so they can be generated once and reused
+ pkt = self.server.packet_class(
+ packet.EVENT, namespace=namespace, data=[event] + data)
+ encoded_packet = pkt.encode()
+ if not isinstance(encoded_packet, list):
+ encoded_packet = [encoded_packet]
+ eio_pkt = [eio_packet.Packet(eio_packet.MESSAGE, p)
+ for p in encoded_packet]
+ for sid, eio_sid in self.get_participants(namespace, room):
+ if sid not in skip_sid:
+ for p in eio_pkt:
+ self.server._send_eio_packet(eio_sid, p)
+ else:
+ # callbacks are used, so each recipient must be sent a packet that
+ # contains a unique callback id
+ # note that callbacks when addressing a group of people are
+ # implemented but not tested or supported
+ for sid, eio_sid in self.get_participants(namespace, room):
+ if sid not in skip_sid: # pragma: no branch
+ id = self._generate_ack_id(sid, callback)
+ pkt = self.server.packet_class(
+ packet.EVENT, namespace=namespace, data=[event] + data,
+ id=id)
+ self.server._send_packet(eio_sid, pkt)
+
+ def disconnect(self, sid, namespace, **kwargs):
+ """Register a client disconnect from a namespace."""
+ return self.basic_disconnect(sid, namespace)
+
+ def enter_room(self, sid, namespace, room, eio_sid=None):
+ """Add a client to a room."""
+ return self.basic_enter_room(sid, namespace, room, eio_sid=eio_sid)
+
+ def leave_room(self, sid, namespace, room):
+ """Remove a client from a room."""
+ return self.basic_leave_room(sid, namespace, room)
+
+ def close_room(self, room, namespace):
+ """Remove all participants from a room."""
+ return self.basic_close_room(room, namespace)
+
+ def trigger_callback(self, sid, id, data):
+ """Invoke an application callback."""
+ callback = None
+ try:
+ callback = self.callbacks[sid][id]
+ except KeyError:
+ # if we get an unknown callback we just ignore it
+ self._get_logger().warning('Unknown callback received, ignoring.')
+ else:
+ del self.callbacks[sid][id]
+ if callback is not None:
+ callback(*data)
diff --git a/.venv/Lib/site-packages/socketio/middleware.py b/.venv/Lib/site-packages/socketio/middleware.py
new file mode 100644
index 0000000..acc8ffd
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/middleware.py
@@ -0,0 +1,40 @@
+import engineio
+
+
+class WSGIApp(engineio.WSGIApp):
+ """WSGI middleware for Socket.IO.
+
+ This middleware dispatches traffic to a Socket.IO application. It can also
+ serve a list of static files to the client, or forward unrelated HTTP
+ traffic to another WSGI application.
+
+ :param socketio_app: The Socket.IO server. Must be an instance of the
+ ``socketio.Server`` class.
+ :param wsgi_app: The WSGI app that receives all other traffic.
+ :param static_files: A dictionary with static file mapping rules. See the
+ documentation for details on this argument.
+ :param socketio_path: The endpoint where the Socket.IO application should
+ be installed. The default value is appropriate for
+ most cases.
+
+ Example usage::
+
+ import socketio
+ import eventlet
+ from . import wsgi_app
+
+ sio = socketio.Server()
+ app = socketio.WSGIApp(sio, wsgi_app)
+ eventlet.wsgi.server(eventlet.listen(('', 8000)), app)
+ """
+ def __init__(self, socketio_app, wsgi_app=None, static_files=None,
+ socketio_path='socket.io'):
+ super().__init__(socketio_app, wsgi_app, static_files=static_files,
+ engineio_path=socketio_path)
+
+
+class Middleware(WSGIApp):
+ """This class has been renamed to WSGIApp and is now deprecated."""
+ def __init__(self, socketio_app, wsgi_app=None,
+ socketio_path='socket.io'):
+ super().__init__(socketio_app, wsgi_app, socketio_path=socketio_path)
diff --git a/.venv/Lib/site-packages/socketio/msgpack_packet.py b/.venv/Lib/site-packages/socketio/msgpack_packet.py
new file mode 100644
index 0000000..2746263
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/msgpack_packet.py
@@ -0,0 +1,18 @@
+import msgpack
+from . import packet
+
+
+class MsgPackPacket(packet.Packet):
+ uses_binary_events = False
+
+ def encode(self):
+ """Encode the packet for transmission."""
+ return msgpack.dumps(self._to_dict())
+
+ def decode(self, encoded_packet):
+ """Decode a transmitted package."""
+ decoded = msgpack.loads(encoded_packet)
+ self.packet_type = decoded['type']
+ self.data = decoded.get('data')
+ self.id = decoded.get('id')
+ self.namespace = decoded['nsp']
diff --git a/.venv/Lib/site-packages/socketio/namespace.py b/.venv/Lib/site-packages/socketio/namespace.py
new file mode 100644
index 0000000..ab4f69f
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/namespace.py
@@ -0,0 +1,198 @@
+from . import base_namespace
+
+
+class Namespace(base_namespace.BaseServerNamespace):
+ """Base class for server-side class-based namespaces.
+
+ A class-based namespace is a class that contains all the event handlers
+ for a Socket.IO namespace. The event handlers are methods of the class
+ with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``,
+ ``on_message``, ``on_json``, and so on.
+
+ :param namespace: The Socket.IO namespace to be used with all the event
+ handlers defined in this class. If this argument is
+ omitted, the default namespace is used.
+ """
+ def trigger_event(self, event, *args):
+ """Dispatch an event to the proper handler method.
+
+ In the most common usage, this method is not overloaded by subclasses,
+ as it performs the routing of events to methods. However, this
+ method can be overridden if special dispatching rules are needed, or if
+ having a single method that catches all events is desired.
+ """
+ handler_name = 'on_' + event
+ if hasattr(self, handler_name):
+ return getattr(self, handler_name)(*args)
+
+ def emit(self, event, data=None, to=None, room=None, skip_sid=None,
+ namespace=None, callback=None, ignore_queue=False):
+ """Emit a custom event to one or more connected clients.
+
+ The only difference with the :func:`socketio.Server.emit` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.emit(event, data=data, to=to, room=room,
+ skip_sid=skip_sid,
+ namespace=namespace or self.namespace,
+ callback=callback, ignore_queue=ignore_queue)
+
+ def send(self, data, to=None, room=None, skip_sid=None, namespace=None,
+ callback=None, ignore_queue=False):
+ """Send a message to one or more connected clients.
+
+ The only difference with the :func:`socketio.Server.send` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.send(data, to=to, room=room, skip_sid=skip_sid,
+ namespace=namespace or self.namespace,
+ callback=callback, ignore_queue=ignore_queue)
+
+ def call(self, event, data=None, to=None, sid=None, namespace=None,
+ timeout=None, ignore_queue=False):
+ """Emit a custom event to a client and wait for the response.
+
+ The only difference with the :func:`socketio.Server.call` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.call(event, data=data, to=to, sid=sid,
+ namespace=namespace or self.namespace,
+ timeout=timeout, ignore_queue=ignore_queue)
+
+ def enter_room(self, sid, room, namespace=None):
+ """Enter a room.
+
+ The only difference with the :func:`socketio.Server.enter_room` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.enter_room(sid, room,
+ namespace=namespace or self.namespace)
+
+ def leave_room(self, sid, room, namespace=None):
+ """Leave a room.
+
+ The only difference with the :func:`socketio.Server.leave_room` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.leave_room(sid, room,
+ namespace=namespace or self.namespace)
+
+ def close_room(self, room, namespace=None):
+ """Close a room.
+
+ The only difference with the :func:`socketio.Server.close_room` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.close_room(room,
+ namespace=namespace or self.namespace)
+
+ def get_session(self, sid, namespace=None):
+ """Return the user session for a client.
+
+ The only difference with the :func:`socketio.Server.get_session`
+ method is that when the ``namespace`` argument is not given the
+ namespace associated with the class is used.
+ """
+ return self.server.get_session(
+ sid, namespace=namespace or self.namespace)
+
+ def save_session(self, sid, session, namespace=None):
+ """Store the user session for a client.
+
+ The only difference with the :func:`socketio.Server.save_session`
+ method is that when the ``namespace`` argument is not given the
+ namespace associated with the class is used.
+ """
+ return self.server.save_session(
+ sid, session, namespace=namespace or self.namespace)
+
+ def session(self, sid, namespace=None):
+ """Return the user session for a client with context manager syntax.
+
+ The only difference with the :func:`socketio.Server.session` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.session(sid, namespace=namespace or self.namespace)
+
+ def disconnect(self, sid, namespace=None):
+ """Disconnect a client.
+
+ The only difference with the :func:`socketio.Server.disconnect` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.server.disconnect(sid,
+ namespace=namespace or self.namespace)
+
+
+class ClientNamespace(base_namespace.BaseClientNamespace):
+ """Base class for client-side class-based namespaces.
+
+ A class-based namespace is a class that contains all the event handlers
+ for a Socket.IO namespace. The event handlers are methods of the class
+ with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``,
+ ``on_message``, ``on_json``, and so on.
+
+ :param namespace: The Socket.IO namespace to be used with all the event
+ handlers defined in this class. If this argument is
+ omitted, the default namespace is used.
+ """
+ def trigger_event(self, event, *args):
+ """Dispatch an event to the proper handler method.
+
+ In the most common usage, this method is not overloaded by subclasses,
+ as it performs the routing of events to methods. However, this
+ method can be overridden if special dispatching rules are needed, or if
+ having a single method that catches all events is desired.
+ """
+ handler_name = 'on_' + event
+ if hasattr(self, handler_name):
+ return getattr(self, handler_name)(*args)
+
+ def emit(self, event, data=None, namespace=None, callback=None):
+ """Emit a custom event to the server.
+
+ The only difference with the :func:`socketio.Client.emit` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.client.emit(event, data=data,
+ namespace=namespace or self.namespace,
+ callback=callback)
+
+ def send(self, data, room=None, namespace=None, callback=None):
+ """Send a message to the server.
+
+ The only difference with the :func:`socketio.Client.send` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.client.send(data, namespace=namespace or self.namespace,
+ callback=callback)
+
+ def call(self, event, data=None, namespace=None, timeout=None):
+ """Emit a custom event to the server and wait for the response.
+
+ The only difference with the :func:`socketio.Client.call` method is
+ that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.client.call(event, data=data,
+ namespace=namespace or self.namespace,
+ timeout=timeout)
+
+ def disconnect(self):
+ """Disconnect from the server.
+
+ The only difference with the :func:`socketio.Client.disconnect` method
+ is that when the ``namespace`` argument is not given the namespace
+ associated with the class is used.
+ """
+ return self.client.disconnect()
diff --git a/.venv/Lib/site-packages/socketio/packet.py b/.venv/Lib/site-packages/socketio/packet.py
new file mode 100644
index 0000000..ec1b364
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/packet.py
@@ -0,0 +1,190 @@
+import functools
+from engineio import json as _json
+
+(CONNECT, DISCONNECT, EVENT, ACK, CONNECT_ERROR, BINARY_EVENT, BINARY_ACK) = \
+ (0, 1, 2, 3, 4, 5, 6)
+packet_names = ['CONNECT', 'DISCONNECT', 'EVENT', 'ACK', 'CONNECT_ERROR',
+ 'BINARY_EVENT', 'BINARY_ACK']
+
+
+class Packet(object):
+ """Socket.IO packet."""
+
+ # the format of the Socket.IO packet is as follows:
+ #
+ # packet type: 1 byte, values 0-6
+ # num_attachments: ASCII encoded, only if num_attachments != 0
+ # '-': only if num_attachments != 0
+ # namespace, followed by a ',': only if namespace != '/'
+ # id: ASCII encoded, only if id is not None
+ # data: JSON dump of data payload
+
+ uses_binary_events = True
+ json = _json
+
+ def __init__(self, packet_type=EVENT, data=None, namespace=None, id=None,
+ binary=None, encoded_packet=None):
+ self.packet_type = packet_type
+ self.data = data
+ self.namespace = namespace
+ self.id = id
+ if self.uses_binary_events and \
+ (binary or (binary is None and self._data_is_binary(
+ self.data))):
+ if self.packet_type == EVENT:
+ self.packet_type = BINARY_EVENT
+ elif self.packet_type == ACK:
+ self.packet_type = BINARY_ACK
+ else:
+ raise ValueError('Packet does not support binary payload.')
+ self.attachment_count = 0
+ self.attachments = []
+ if encoded_packet:
+ self.attachment_count = self.decode(encoded_packet) or 0
+
+ def encode(self):
+ """Encode the packet for transmission.
+
+ If the packet contains binary elements, this function returns a list
+ of packets where the first is the original packet with placeholders for
+ the binary components and the remaining ones the binary attachments.
+ """
+ encoded_packet = str(self.packet_type)
+ if self.packet_type == BINARY_EVENT or self.packet_type == BINARY_ACK:
+ data, attachments = self._deconstruct_binary(self.data)
+ encoded_packet += str(len(attachments)) + '-'
+ else:
+ data = self.data
+ attachments = None
+ if self.namespace is not None and self.namespace != '/':
+ encoded_packet += self.namespace + ','
+ if self.id is not None:
+ encoded_packet += str(self.id)
+ if data is not None:
+ encoded_packet += self.json.dumps(data, separators=(',', ':'))
+ if attachments is not None:
+ encoded_packet = [encoded_packet] + attachments
+ return encoded_packet
+
+ def decode(self, encoded_packet):
+ """Decode a transmitted package.
+
+ The return value indicates how many binary attachment packets are
+ necessary to fully decode the packet.
+ """
+ ep = encoded_packet
+ try:
+ self.packet_type = int(ep[0:1])
+ except TypeError:
+ self.packet_type = ep
+ ep = ''
+ self.namespace = None
+ self.data = None
+ ep = ep[1:]
+ dash = ep.find('-')
+ attachment_count = 0
+ if dash > 0 and ep[0:dash].isdigit():
+ if dash > 10:
+ raise ValueError('too many attachments')
+ attachment_count = int(ep[0:dash])
+ ep = ep[dash + 1:]
+ if ep and ep[0:1] == '/':
+ sep = ep.find(',')
+ if sep == -1:
+ self.namespace = ep
+ ep = ''
+ else:
+ self.namespace = ep[0:sep]
+ ep = ep[sep + 1:]
+ q = self.namespace.find('?')
+ if q != -1:
+ self.namespace = self.namespace[0:q]
+ if ep and ep[0].isdigit():
+ i = 1
+ end = len(ep)
+ while i < end:
+ if not ep[i].isdigit() or i >= 100:
+ break
+ i += 1
+ self.id = int(ep[:i])
+ ep = ep[i:]
+ if len(ep) > 0 and ep[0].isdigit():
+ raise ValueError('id field is too long')
+ if ep:
+ self.data = self.json.loads(ep)
+ return attachment_count
+
+ def add_attachment(self, attachment):
+ if self.attachment_count <= len(self.attachments):
+ raise ValueError('Unexpected binary attachment')
+ self.attachments.append(attachment)
+ if self.attachment_count == len(self.attachments):
+ self.reconstruct_binary(self.attachments)
+ return True
+ return False
+
+ def reconstruct_binary(self, attachments):
+ """Reconstruct a decoded packet using the given list of binary
+ attachments.
+ """
+ self.data = self._reconstruct_binary_internal(self.data,
+ self.attachments)
+
+ def _reconstruct_binary_internal(self, data, attachments):
+ if isinstance(data, list):
+ return [self._reconstruct_binary_internal(item, attachments)
+ for item in data]
+ elif isinstance(data, dict):
+ if data.get('_placeholder') and 'num' in data:
+ return attachments[data['num']]
+ else:
+ return {key: self._reconstruct_binary_internal(value,
+ attachments)
+ for key, value in data.items()}
+ else:
+ return data
+
+ def _deconstruct_binary(self, data):
+ """Extract binary components in the packet."""
+ attachments = []
+ data = self._deconstruct_binary_internal(data, attachments)
+ return data, attachments
+
+ def _deconstruct_binary_internal(self, data, attachments):
+ if isinstance(data, bytes):
+ attachments.append(data)
+ return {'_placeholder': True, 'num': len(attachments) - 1}
+ elif isinstance(data, list):
+ return [self._deconstruct_binary_internal(item, attachments)
+ for item in data]
+ elif isinstance(data, dict):
+ return {key: self._deconstruct_binary_internal(value, attachments)
+ for key, value in data.items()}
+ else:
+ return data
+
+ def _data_is_binary(self, data):
+ """Check if the data contains binary components."""
+ if isinstance(data, bytes):
+ return True
+ elif isinstance(data, list):
+ return functools.reduce(
+ lambda a, b: a or b, [self._data_is_binary(item)
+ for item in data], False)
+ elif isinstance(data, dict):
+ return functools.reduce(
+ lambda a, b: a or b, [self._data_is_binary(item)
+ for item in data.values()],
+ False)
+ else:
+ return False
+
+ def _to_dict(self):
+ d = {
+ 'type': self.packet_type,
+ 'data': self.data,
+ 'nsp': self.namespace,
+ }
+ if self.id is not None:
+ d['id'] = self.id
+ return d
diff --git a/.venv/Lib/site-packages/socketio/pubsub_manager.py b/.venv/Lib/site-packages/socketio/pubsub_manager.py
new file mode 100644
index 0000000..5ca7619
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/pubsub_manager.py
@@ -0,0 +1,232 @@
+from functools import partial
+import uuid
+
+from engineio import json
+import pickle
+
+from .manager import Manager
+
+
+class PubSubManager(Manager):
+ """Manage a client list attached to a pub/sub backend.
+
+ This is a base class that enables multiple servers to share the list of
+ clients, with the servers communicating events through a pub/sub backend.
+ The use of a pub/sub backend also allows any client connected to the
+ backend to emit events addressed to Socket.IO clients.
+
+ The actual backends must be implemented by subclasses, this class only
+ provides a pub/sub generic framework.
+
+ :param channel: The channel name on which the server sends and receives
+ notifications.
+ """
+ name = 'pubsub'
+
+ def __init__(self, channel='socketio', write_only=False, logger=None):
+ super().__init__()
+ self.channel = channel
+ self.write_only = write_only
+ self.host_id = uuid.uuid4().hex
+ self.logger = logger
+
+ def initialize(self):
+ super().initialize()
+ if not self.write_only:
+ self.thread = self.server.start_background_task(self._thread)
+ self._get_logger().info(self.name + ' backend initialized.')
+
+ def emit(self, event, data, namespace=None, room=None, skip_sid=None,
+ callback=None, **kwargs):
+ """Emit a message to a single client, a room, or all the clients
+ connected to the namespace.
+
+ This method takes care or propagating the message to all the servers
+ that are connected through the message queue.
+
+ The parameters are the same as in :meth:`.Server.emit`.
+ """
+ if kwargs.get('ignore_queue'):
+ return super().emit(
+ event, data, namespace=namespace, room=room, skip_sid=skip_sid,
+ callback=callback)
+ namespace = namespace or '/'
+ if callback is not None:
+ if self.server is None:
+ raise RuntimeError('Callbacks can only be issued from the '
+ 'context of a server.')
+ if room is None:
+ raise ValueError('Cannot use callback without a room set.')
+ id = self._generate_ack_id(room, callback)
+ callback = (room, namespace, id)
+ else:
+ callback = None
+ message = {'method': 'emit', 'event': event, 'data': data,
+ 'namespace': namespace, 'room': room,
+ 'skip_sid': skip_sid, 'callback': callback,
+ 'host_id': self.host_id}
+ self._handle_emit(message) # handle in this host
+ self._publish(message) # notify other hosts
+
+ def can_disconnect(self, sid, namespace):
+ if self.is_connected(sid, namespace):
+ # client is in this server, so we can disconnect directly
+ return super().can_disconnect(sid, namespace)
+ else:
+ # client is in another server, so we post request to the queue
+ message = {'method': 'disconnect', 'sid': sid,
+ 'namespace': namespace or '/', 'host_id': self.host_id}
+ self._handle_disconnect(message) # handle in this host
+ self._publish(message) # notify other hosts
+
+ def disconnect(self, sid, namespace=None, **kwargs):
+ if kwargs.get('ignore_queue'):
+ return super().disconnect(sid, namespace=namespace)
+ message = {'method': 'disconnect', 'sid': sid,
+ 'namespace': namespace or '/', 'host_id': self.host_id}
+ self._handle_disconnect(message) # handle in this host
+ self._publish(message) # notify other hosts
+
+ def enter_room(self, sid, namespace, room, eio_sid=None):
+ if self.is_connected(sid, namespace):
+ # client is in this server, so we can add to the room directly
+ return super().enter_room(sid, namespace, room, eio_sid=eio_sid)
+ else:
+ message = {'method': 'enter_room', 'sid': sid, 'room': room,
+ 'namespace': namespace or '/', 'host_id': self.host_id}
+ self._publish(message) # notify other hosts
+
+ def leave_room(self, sid, namespace, room):
+ if self.is_connected(sid, namespace):
+ # client is in this server, so we can remove from the room directly
+ return super().leave_room(sid, namespace, room)
+ else:
+ message = {'method': 'leave_room', 'sid': sid, 'room': room,
+ 'namespace': namespace or '/', 'host_id': self.host_id}
+ self._publish(message) # notify other hosts
+
+ def close_room(self, room, namespace=None):
+ message = {'method': 'close_room', 'room': room,
+ 'namespace': namespace or '/', 'host_id': self.host_id}
+ self._handle_close_room(message) # handle in this host
+ self._publish(message) # notify other hosts
+
+ def _publish(self, data):
+ """Publish a message on the Socket.IO channel.
+
+ This method needs to be implemented by the different subclasses that
+ support pub/sub backends.
+ """
+ raise NotImplementedError('This method must be implemented in a '
+ 'subclass.') # pragma: no cover
+
+ def _listen(self):
+ """Return the next message published on the Socket.IO channel,
+ blocking until a message is available.
+
+ This method needs to be implemented by the different subclasses that
+ support pub/sub backends.
+ """
+ raise NotImplementedError('This method must be implemented in a '
+ 'subclass.') # pragma: no cover
+
+ def _handle_emit(self, message):
+ # Events with callbacks are very tricky to handle across hosts
+ # Here in the receiving end we set up a local callback that preserves
+ # the callback host and id from the sender
+ remote_callback = message.get('callback')
+ remote_host_id = message.get('host_id')
+ if remote_callback is not None and len(remote_callback) == 3:
+ callback = partial(self._return_callback, remote_host_id,
+ *remote_callback)
+ else:
+ callback = None
+ super().emit(message['event'], message['data'],
+ namespace=message.get('namespace'),
+ room=message.get('room'),
+ skip_sid=message.get('skip_sid'), callback=callback)
+
+ def _handle_callback(self, message):
+ if self.host_id == message.get('host_id'):
+ try:
+ sid = message['sid']
+ id = message['id']
+ args = message['args']
+ except KeyError:
+ return
+ self.trigger_callback(sid, id, args)
+
+ def _return_callback(self, host_id, sid, namespace, callback_id, *args):
+ # When an event callback is received, the callback is returned back
+ # to the sender, which is identified by the host_id
+ if host_id == self.host_id:
+ self.trigger_callback(sid, callback_id, args)
+ else:
+ self._publish({'method': 'callback', 'host_id': host_id,
+ 'sid': sid, 'namespace': namespace,
+ 'id': callback_id, 'args': args})
+
+ def _handle_disconnect(self, message):
+ self.server.disconnect(sid=message.get('sid'),
+ namespace=message.get('namespace'),
+ ignore_queue=True)
+
+ def _handle_enter_room(self, message):
+ sid = message.get('sid')
+ namespace = message.get('namespace')
+ if self.is_connected(sid, namespace):
+ super().enter_room(sid, namespace, message.get('room'))
+
+ def _handle_leave_room(self, message):
+ sid = message.get('sid')
+ namespace = message.get('namespace')
+ if self.is_connected(sid, namespace):
+ super().leave_room(sid, namespace, message.get('room'))
+
+ def _handle_close_room(self, message):
+ super().close_room(room=message.get('room'),
+ namespace=message.get('namespace'))
+
+ def _thread(self):
+ while True:
+ try:
+ for message in self._listen():
+ data = None
+ if isinstance(message, dict):
+ data = message
+ else:
+ if isinstance(message, bytes): # pragma: no cover
+ try:
+ data = pickle.loads(message)
+ except:
+ pass
+ if data is None:
+ try:
+ data = json.loads(message)
+ except:
+ pass
+ if data and 'method' in data:
+ self._get_logger().debug('pubsub message: {}'.format(
+ data['method']))
+ try:
+ if data['method'] == 'callback':
+ self._handle_callback(data)
+ elif data.get('host_id') != self.host_id:
+ if data['method'] == 'emit':
+ self._handle_emit(data)
+ elif data['method'] == 'disconnect':
+ self._handle_disconnect(data)
+ elif data['method'] == 'enter_room':
+ self._handle_enter_room(data)
+ elif data['method'] == 'leave_room':
+ self._handle_leave_room(data)
+ elif data['method'] == 'close_room':
+ self._handle_close_room(data)
+ except Exception:
+ self.server.logger.exception(
+ 'Handler error in pubsub listening thread')
+ self.server.logger.error('pubsub listen() exited unexpectedly')
+ break # loop should never exit except in unit tests!
+ except Exception: # pragma: no cover
+ self.server.logger.exception('Unexpected Error in pubsub '
+ 'listening thread')
diff --git a/.venv/Lib/site-packages/socketio/redis_manager.py b/.venv/Lib/site-packages/socketio/redis_manager.py
new file mode 100644
index 0000000..ae9fa29
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/redis_manager.py
@@ -0,0 +1,115 @@
+import logging
+import pickle
+import time
+
+try:
+ import redis
+except ImportError:
+ redis = None
+
+from .pubsub_manager import PubSubManager
+
+logger = logging.getLogger('socketio')
+
+
+class RedisManager(PubSubManager): # pragma: no cover
+ """Redis based client manager.
+
+ This class implements a Redis backend for event sharing across multiple
+ processes. Only kept here as one more example of how to build a custom
+ backend, since the kombu backend is perfectly adequate to support a Redis
+ message queue.
+
+ To use a Redis backend, initialize the :class:`Server` instance as
+ follows::
+
+ url = 'redis://hostname:port/0'
+ server = socketio.Server(client_manager=socketio.RedisManager(url))
+
+ :param url: The connection URL for the Redis server. For a default Redis
+ store running on the same host, use ``redis://``. To use an
+ SSL connection, use ``rediss://``.
+ :param channel: The channel name on which the server sends and receives
+ notifications. Must be the same in all the servers.
+ :param write_only: If set to ``True``, only initialize to emit events. The
+ default of ``False`` initializes the class for emitting
+ and receiving.
+ :param redis_options: additional keyword arguments to be passed to
+ ``Redis.from_url()``.
+ """
+ name = 'redis'
+
+ def __init__(self, url='redis://localhost:6379/0', channel='socketio',
+ write_only=False, logger=None, redis_options=None):
+ if redis is None:
+ raise RuntimeError('Redis package is not installed '
+ '(Run "pip install redis" in your '
+ 'virtualenv).')
+ self.redis_url = url
+ self.redis_options = redis_options or {}
+ self._redis_connect()
+ super().__init__(channel=channel, write_only=write_only, logger=logger)
+
+ def initialize(self):
+ super().initialize()
+
+ monkey_patched = True
+ if self.server.async_mode == 'eventlet':
+ from eventlet.patcher import is_monkey_patched
+ monkey_patched = is_monkey_patched('socket')
+ elif 'gevent' in self.server.async_mode:
+ from gevent.monkey import is_module_patched
+ monkey_patched = is_module_patched('socket')
+ if not monkey_patched:
+ raise RuntimeError(
+ 'Redis requires a monkey patched socket library to work '
+ 'with ' + self.server.async_mode)
+
+ def _redis_connect(self):
+ self.redis = redis.Redis.from_url(self.redis_url,
+ **self.redis_options)
+ self.pubsub = self.redis.pubsub(ignore_subscribe_messages=True)
+
+ def _publish(self, data):
+ retry = True
+ while True:
+ try:
+ if not retry:
+ self._redis_connect()
+ return self.redis.publish(self.channel, pickle.dumps(data))
+ except redis.exceptions.RedisError:
+ if retry:
+ logger.error('Cannot publish to redis... retrying')
+ retry = False
+ else:
+ logger.error('Cannot publish to redis... giving up')
+ break
+
+ def _redis_listen_with_retries(self):
+ retry_sleep = 1
+ connect = False
+ while True:
+ try:
+ if connect:
+ self._redis_connect()
+ self.pubsub.subscribe(self.channel)
+ retry_sleep = 1
+ for message in self.pubsub.listen():
+ yield message
+ except redis.exceptions.RedisError:
+ logger.error('Cannot receive from redis... '
+ 'retrying in {} secs'.format(retry_sleep))
+ connect = True
+ time.sleep(retry_sleep)
+ retry_sleep *= 2
+ if retry_sleep > 60:
+ retry_sleep = 60
+
+ def _listen(self):
+ channel = self.channel.encode('utf-8')
+ self.pubsub.subscribe(self.channel)
+ for message in self._redis_listen_with_retries():
+ if message['channel'] == channel and \
+ message['type'] == 'message' and 'data' in message:
+ yield message['data']
+ self.pubsub.unsubscribe(self.channel)
diff --git a/.venv/Lib/site-packages/socketio/server.py b/.venv/Lib/site-packages/socketio/server.py
new file mode 100644
index 0000000..a40dcd9
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/server.py
@@ -0,0 +1,666 @@
+import logging
+
+import engineio
+
+from . import base_server
+from . import exceptions
+from . import packet
+
+default_logger = logging.getLogger('socketio.server')
+
+
+class Server(base_server.BaseServer):
+ """A Socket.IO server.
+
+ This class implements a fully compliant Socket.IO web server with support
+ for websocket and long-polling transports.
+
+ :param client_manager: The client manager instance that will manage the
+ client list. When this is omitted, the client list
+ is stored in an in-memory structure, so the use of
+ multiple connected servers is not possible.
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. The default is
+ ``False``. Note that fatal errors are logged even when
+ ``logger`` is ``False``.
+ :param serializer: The serialization method to use when transmitting
+ packets. Valid values are ``'default'``, ``'pickle'``,
+ ``'msgpack'`` and ``'cbor'``. Alternatively, a subclass
+ of the :class:`Packet` class with custom implementations
+ of the ``encode()`` and ``decode()`` methods can be
+ provided. Client and server must use compatible
+ serializers.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions.
+ :param async_handlers: If set to ``True``, event handlers for a client are
+ executed in separate threads. To run handlers for a
+ client synchronously, set to ``False``. The default
+ is ``True``.
+ :param always_connect: When set to ``False``, new connections are
+ provisory until the connect handler returns
+ something other than ``False``, at which point they
+ are accepted. When set to ``True``, connections are
+ immediately accepted, and then if the connect
+ handler returns ``False`` a disconnect is issued.
+ Set to ``True`` if you need to emit events from the
+ connect handler and your client is confused when it
+ receives events before the connection acceptance.
+ In any other case use the default of ``False``.
+ :param namespaces: a list of namespaces that are accepted, in addition to
+ any namespaces for which handlers have been defined. The
+ default is `['/']`, which always accepts connections to
+ the default namespace. Set to `'*'` to accept all
+ namespaces.
+ :param kwargs: Connection parameters for the underlying Engine.IO server.
+
+ The Engine.IO configuration supports the following settings:
+
+ :param async_mode: The asynchronous model to use. See the Deployment
+ section in the documentation for a description of the
+ available options. Valid async modes are
+ ``'threading'``, ``'eventlet'``, ``'gevent'`` and
+ ``'gevent_uwsgi'``. If this argument is not given,
+ ``'eventlet'`` is tried first, then ``'gevent_uwsgi'``,
+ then ``'gevent'``, and finally ``'threading'``.
+ The first async mode that has all its dependencies
+ installed is then one that is chosen.
+ :param ping_interval: The interval in seconds at which the server pings
+ the client. The default is 25 seconds. For advanced
+ control, a two element tuple can be given, where
+ the first number is the ping interval and the second
+ is a grace period added by the server.
+ :param ping_timeout: The time in seconds that the client waits for the
+ server to respond before disconnecting. The default
+ is 20 seconds.
+ :param max_http_buffer_size: The maximum size that is accepted for incoming
+ messages. The default is 1,000,000 bytes. In
+ spite of its name, the value set in this
+ argument is enforced for HTTP long-polling and
+ WebSocket connections.
+ :param allow_upgrades: Whether to allow transport upgrades or not. The
+ default is ``True``.
+ :param http_compression: Whether to compress packages when using the
+ polling transport. The default is ``True``.
+ :param compression_threshold: Only compress messages when their byte size
+ is greater than this value. The default is
+ 1024 bytes.
+ :param cookie: If set to a string, it is the name of the HTTP cookie the
+ server sends back tot he client containing the client
+ session id. If set to a dictionary, the ``'name'`` key
+ contains the cookie name and other keys define cookie
+ attributes, where the value of each attribute can be a
+ string, a callable with no arguments, or a boolean. If set
+ to ``None`` (the default), a cookie is not sent to the
+ client.
+ :param cors_allowed_origins: Origin or list of origins that are allowed to
+ connect to this server. Only the same origin
+ is allowed by default. Set this argument to
+ ``'*'`` to allow all origins, or to ``[]`` to
+ disable CORS handling.
+ :param cors_credentials: Whether credentials (cookies, authentication) are
+ allowed in requests to this server. The default is
+ ``True``.
+ :param monitor_clients: If set to ``True``, a background task will ensure
+ inactive clients are closed. Set to ``False`` to
+ disable the monitoring task (not recommended). The
+ default is ``True``.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. Defaults to
+ ``['polling', 'websocket']``.
+ :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
+ a logger object to use. To disable logging set to
+ ``False``. The default is ``False``. Note that
+ fatal errors are logged even when
+ ``engineio_logger`` is ``False``.
+ """
+ def emit(self, event, data=None, to=None, room=None, skip_sid=None,
+ namespace=None, callback=None, ignore_queue=False):
+ """Emit a custom event to one or more connected clients.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the client or clients. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param to: The recipient of the message. This can be set to the
+ session ID of a client to address only that client, to any
+ custom room created by the application to address all
+ the clients in that room, or to a list of custom room
+ names. If this argument is omitted the event is broadcasted
+ to all connected clients.
+ :param room: Alias for the ``to`` parameter.
+ :param skip_sid: The session ID of a client to skip when broadcasting
+ to a room or to all clients. This can be used to
+ prevent a message from being sent to the sender. To
+ skip multiple sids, pass a list.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param callback: If given, this function will be called to acknowledge
+ the client has received the message. The arguments
+ that will be passed to the function are those provided
+ by the client. Callback functions can only be used
+ when addressing an individual client.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ clients directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used. It is recommended
+ to always leave this parameter with its default
+ value of ``False``.
+
+ Note: this method is not thread safe. If multiple threads are emitting
+ at the same time to the same client, then messages composed of
+ multiple packets may end up being sent in an incorrect sequence. Use
+ standard concurrency solutions (such as a Lock object) to prevent this
+ situation.
+ """
+ namespace = namespace or '/'
+ room = to or room
+ self.logger.info('emitting event "%s" to %s [%s]', event,
+ room or 'all', namespace)
+ self.manager.emit(event, data, namespace, room=room,
+ skip_sid=skip_sid, callback=callback,
+ ignore_queue=ignore_queue)
+
+ def send(self, data, to=None, room=None, skip_sid=None, namespace=None,
+ callback=None, ignore_queue=False):
+ """Send a message to one or more connected clients.
+
+ This function emits an event with the name ``'message'``. Use
+ :func:`emit` to issue custom event names.
+
+ :param data: The data to send to the client or clients. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param to: The recipient of the message. This can be set to the
+ session ID of a client to address only that client, to any
+ any custom room created by the application to address all
+ the clients in that room, or to a list of custom room
+ names. If this argument is omitted the event is broadcasted
+ to all connected clients.
+ :param room: Alias for the ``to`` parameter.
+ :param skip_sid: The session ID of a client to skip when broadcasting
+ to a room or to all clients. This can be used to
+ prevent a message from being sent to the sender. To
+ skip multiple sids, pass a list.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param callback: If given, this function will be called to acknowledge
+ the client has received the message. The arguments
+ that will be passed to the function are those provided
+ by the client. Callback functions can only be used
+ when addressing an individual client.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ clients directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used. It is recommended
+ to always leave this parameter with its default
+ value of ``False``.
+ """
+ self.emit('message', data=data, to=to, room=room, skip_sid=skip_sid,
+ namespace=namespace, callback=callback,
+ ignore_queue=ignore_queue)
+
+ def call(self, event, data=None, to=None, sid=None, namespace=None,
+ timeout=60, ignore_queue=False):
+ """Emit a custom event to a client and wait for the response.
+
+ This method issues an emit with a callback and waits for the callback
+ to be invoked before returning. If the callback isn't invoked before
+ the timeout, then a ``TimeoutError`` exception is raised. If the
+ Socket.IO connection drops during the wait, this method still waits
+ until the specified timeout.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the client or clients. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param to: The session ID of the recipient client.
+ :param sid: Alias for the ``to`` parameter.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the event is emitted to the
+ default namespace.
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the client acknowledges the event, then a
+ ``TimeoutError`` exception is raised.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ client directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used. It is recommended
+ to always leave this parameter with its default
+ value of ``False``.
+
+ Note: this method is not thread safe. If multiple threads are emitting
+ at the same time to the same client, then messages composed of
+ multiple packets may end up being sent in an incorrect sequence. Use
+ standard concurrency solutions (such as a Lock object) to prevent this
+ situation.
+ """
+ if to is None and sid is None:
+ raise ValueError('Cannot use call() to broadcast.')
+ if not self.async_handlers:
+ raise RuntimeError(
+ 'Cannot use call() when async_handlers is False.')
+ callback_event = self.eio.create_event()
+ callback_args = []
+
+ def event_callback(*args):
+ callback_args.append(args)
+ callback_event.set()
+
+ self.emit(event, data=data, room=to or sid, namespace=namespace,
+ callback=event_callback, ignore_queue=ignore_queue)
+ if not callback_event.wait(timeout=timeout):
+ raise exceptions.TimeoutError()
+ return callback_args[0] if len(callback_args[0]) > 1 \
+ else callback_args[0][0] if len(callback_args[0]) == 1 \
+ else None
+
+ def enter_room(self, sid, room, namespace=None):
+ """Enter a room.
+
+ This function adds the client to a room. The :func:`emit` and
+ :func:`send` functions can optionally broadcast events to all the
+ clients in a room.
+
+ :param sid: Session ID of the client.
+ :param room: Room name. If the room does not exist it is created.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the default namespace is used.
+ """
+ namespace = namespace or '/'
+ self.logger.info('%s is entering room %s [%s]', sid, room, namespace)
+ self.manager.enter_room(sid, namespace, room)
+
+ def leave_room(self, sid, room, namespace=None):
+ """Leave a room.
+
+ This function removes the client from a room.
+
+ :param sid: Session ID of the client.
+ :param room: Room name.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the default namespace is used.
+ """
+ namespace = namespace or '/'
+ self.logger.info('%s is leaving room %s [%s]', sid, room, namespace)
+ self.manager.leave_room(sid, namespace, room)
+
+ def close_room(self, room, namespace=None):
+ """Close a room.
+
+ This function removes all the clients from the given room.
+
+ :param room: Room name.
+ :param namespace: The Socket.IO namespace for the event. If this
+ argument is omitted the default namespace is used.
+ """
+ namespace = namespace or '/'
+ self.logger.info('room %s is closing [%s]', room, namespace)
+ self.manager.close_room(room, namespace)
+
+ def get_session(self, sid, namespace=None):
+ """Return the user session for a client.
+
+ :param sid: The session id of the client.
+ :param namespace: The Socket.IO namespace. If this argument is omitted
+ the default namespace is used.
+
+ The return value is a dictionary. Modifications made to this
+ dictionary are not guaranteed to be preserved unless
+ ``save_session()`` is called, or when the ``session`` context manager
+ is used.
+ """
+ namespace = namespace or '/'
+ eio_sid = self.manager.eio_sid_from_sid(sid, namespace)
+ eio_session = self.eio.get_session(eio_sid)
+ return eio_session.setdefault(namespace, {})
+
+ def save_session(self, sid, session, namespace=None):
+ """Store the user session for a client.
+
+ :param sid: The session id of the client.
+ :param session: The session dictionary.
+ :param namespace: The Socket.IO namespace. If this argument is omitted
+ the default namespace is used.
+ """
+ namespace = namespace or '/'
+ eio_sid = self.manager.eio_sid_from_sid(sid, namespace)
+ eio_session = self.eio.get_session(eio_sid)
+ eio_session[namespace] = session
+
+ def session(self, sid, namespace=None):
+ """Return the user session for a client with context manager syntax.
+
+ :param sid: The session id of the client.
+
+ This is a context manager that returns the user session dictionary for
+ the client. Any changes that are made to this dictionary inside the
+ context manager block are saved back to the session. Example usage::
+
+ @sio.on('connect')
+ def on_connect(sid, environ):
+ username = authenticate_user(environ)
+ if not username:
+ return False
+ with sio.session(sid) as session:
+ session['username'] = username
+
+ @sio.on('message')
+ def on_message(sid, msg):
+ with sio.session(sid) as session:
+ print('received message from ', session['username'])
+ """
+ class _session_context_manager(object):
+ def __init__(self, server, sid, namespace):
+ self.server = server
+ self.sid = sid
+ self.namespace = namespace
+ self.session = None
+
+ def __enter__(self):
+ self.session = self.server.get_session(sid,
+ namespace=namespace)
+ return self.session
+
+ def __exit__(self, *args):
+ self.server.save_session(sid, self.session,
+ namespace=namespace)
+
+ return _session_context_manager(self, sid, namespace)
+
+ def disconnect(self, sid, namespace=None, ignore_queue=False):
+ """Disconnect a client.
+
+ :param sid: Session ID of the client.
+ :param namespace: The Socket.IO namespace to disconnect. If this
+ argument is omitted the default namespace is used.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the disconnect is processed
+ locally, without broadcasting on the queue. It is
+ recommended to always leave this parameter with
+ its default value of ``False``.
+ """
+ namespace = namespace or '/'
+ if ignore_queue:
+ delete_it = self.manager.is_connected(sid, namespace)
+ else:
+ delete_it = self.manager.can_disconnect(sid, namespace)
+ if delete_it:
+ self.logger.info('Disconnecting %s [%s]', sid, namespace)
+ eio_sid = self.manager.pre_disconnect(sid, namespace=namespace)
+ self._send_packet(eio_sid, self.packet_class(
+ packet.DISCONNECT, namespace=namespace))
+ self._trigger_event('disconnect', namespace, sid)
+ self.manager.disconnect(sid, namespace=namespace,
+ ignore_queue=True)
+
+ def shutdown(self):
+ """Stop Socket.IO background tasks.
+
+ This method stops all background activity initiated by the Socket.IO
+ server. It must be called before shutting down the web server.
+ """
+ self.logger.info('Socket.IO is shutting down')
+ self.eio.shutdown()
+
+ def handle_request(self, environ, start_response):
+ """Handle an HTTP request from the client.
+
+ This is the entry point of the Socket.IO application, using the same
+ interface as a WSGI application. For the typical usage, this function
+ is invoked by the :class:`Middleware` instance, but it can be invoked
+ directly when the middleware is not used.
+
+ :param environ: The WSGI environment.
+ :param start_response: The WSGI ``start_response`` function.
+
+ This function returns the HTTP response body to deliver to the client
+ as a byte sequence.
+ """
+ return self.eio.handle_request(environ, start_response)
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task using the appropriate async model.
+
+ This is a utility function that applications can use to start a
+ background task using the method that is compatible with the
+ selected async mode.
+
+ :param target: the target function to execute.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ This function returns an object that represents the background task,
+ on which the ``join()`` methond can be invoked to wait for the task to
+ complete.
+ """
+ return self.eio.start_background_task(target, *args, **kwargs)
+
+ def sleep(self, seconds=0):
+ """Sleep for the requested amount of time using the appropriate async
+ model.
+
+ This is a utility function that applications can use to put a task to
+ sleep without having to worry about using the correct call for the
+ selected async mode.
+ """
+ return self.eio.sleep(seconds)
+
+ def instrument(self, auth=None, mode='development', read_only=False,
+ server_id=None, namespace='/admin',
+ server_stats_interval=2):
+ """Instrument the Socket.IO server for monitoring with the `Socket.IO
+ Admin UI `_.
+
+ :param auth: Authentication credentials for Admin UI access. Set to a
+ dictionary with the expected login (usually ``username``
+ and ``password``) or a list of dictionaries if more than
+ one set of credentials need to be available. For more
+ complex authentication methods, set to a callable that
+ receives the authentication dictionary as an argument and
+ returns ``True`` if the user is allowed or ``False``
+ otherwise. To disable authentication, set this argument to
+ ``False`` (not recommended, never do this on a production
+ server).
+ :param mode: The reporting mode. The default is ``'development'``,
+ which is best used while debugging, as it may have a
+ significant performance effect. Set to ``'production'`` to
+ reduce the amount of information that is reported to the
+ admin UI.
+ :param read_only: If set to ``True``, the admin interface will be
+ read-only, with no option to modify room assignments
+ or disconnect clients. The default is ``False``.
+ :param server_id: The server name to use for this server. If this
+ argument is omitted, the server generates its own
+ name.
+ :param namespace: The Socket.IO namespace to use for the admin
+ interface. The default is ``/admin``.
+ :param server_stats_interval: The interval in seconds at which the
+ server emits a summary of it stats to all
+ connected admins.
+ """
+ from .admin import InstrumentedServer
+ return InstrumentedServer(
+ self, auth=auth, mode=mode, read_only=read_only,
+ server_id=server_id, namespace=namespace,
+ server_stats_interval=server_stats_interval)
+
+ def _send_packet(self, eio_sid, pkt):
+ """Send a Socket.IO packet to a client."""
+ encoded_packet = pkt.encode()
+ if isinstance(encoded_packet, list):
+ for ep in encoded_packet:
+ self.eio.send(eio_sid, ep)
+ else:
+ self.eio.send(eio_sid, encoded_packet)
+
+ def _send_eio_packet(self, eio_sid, eio_pkt):
+ """Send a raw Engine.IO packet to a client."""
+ self.eio.send_packet(eio_sid, eio_pkt)
+
+ def _handle_connect(self, eio_sid, namespace, data):
+ """Handle a client connection request."""
+ namespace = namespace or '/'
+ sid = None
+ if namespace in self.handlers or namespace in self.namespace_handlers \
+ or self.namespaces == '*' or namespace in self.namespaces:
+ sid = self.manager.connect(eio_sid, namespace)
+ if sid is None:
+ self._send_packet(eio_sid, self.packet_class(
+ packet.CONNECT_ERROR, data='Unable to connect',
+ namespace=namespace))
+ return
+
+ if self.always_connect:
+ self._send_packet(eio_sid, self.packet_class(
+ packet.CONNECT, {'sid': sid}, namespace=namespace))
+ fail_reason = exceptions.ConnectionRefusedError().error_args
+ try:
+ if data:
+ success = self._trigger_event(
+ 'connect', namespace, sid, self.environ[eio_sid], data)
+ else:
+ try:
+ success = self._trigger_event(
+ 'connect', namespace, sid, self.environ[eio_sid])
+ except TypeError:
+ success = self._trigger_event(
+ 'connect', namespace, sid, self.environ[eio_sid], None)
+ except exceptions.ConnectionRefusedError as exc:
+ fail_reason = exc.error_args
+ success = False
+
+ if success is False:
+ if self.always_connect:
+ self.manager.pre_disconnect(sid, namespace)
+ self._send_packet(eio_sid, self.packet_class(
+ packet.DISCONNECT, data=fail_reason, namespace=namespace))
+ else:
+ self._send_packet(eio_sid, self.packet_class(
+ packet.CONNECT_ERROR, data=fail_reason,
+ namespace=namespace))
+ self.manager.disconnect(sid, namespace, ignore_queue=True)
+ elif not self.always_connect:
+ self._send_packet(eio_sid, self.packet_class(
+ packet.CONNECT, {'sid': sid}, namespace=namespace))
+
+ def _handle_disconnect(self, eio_sid, namespace):
+ """Handle a client disconnect."""
+ namespace = namespace or '/'
+ sid = self.manager.sid_from_eio_sid(eio_sid, namespace)
+ if not self.manager.is_connected(sid, namespace): # pragma: no cover
+ return
+ self.manager.pre_disconnect(sid, namespace=namespace)
+ self._trigger_event('disconnect', namespace, sid)
+ self.manager.disconnect(sid, namespace, ignore_queue=True)
+
+ def _handle_event(self, eio_sid, namespace, id, data):
+ """Handle an incoming client event."""
+ namespace = namespace or '/'
+ sid = self.manager.sid_from_eio_sid(eio_sid, namespace)
+ self.logger.info('received event "%s" from %s [%s]', data[0], sid,
+ namespace)
+ if not self.manager.is_connected(sid, namespace):
+ self.logger.warning('%s is not connected to namespace %s',
+ sid, namespace)
+ return
+ if self.async_handlers:
+ self.start_background_task(self._handle_event_internal, self, sid,
+ eio_sid, data, namespace, id)
+ else:
+ self._handle_event_internal(self, sid, eio_sid, data, namespace,
+ id)
+
+ def _handle_event_internal(self, server, sid, eio_sid, data, namespace,
+ id):
+ r = server._trigger_event(data[0], namespace, sid, *data[1:])
+ if r != self.not_handled and id is not None:
+ # send ACK packet with the response returned by the handler
+ # tuples are expanded as multiple arguments
+ if r is None:
+ data = []
+ elif isinstance(r, tuple):
+ data = list(r)
+ else:
+ data = [r]
+ server._send_packet(eio_sid, self.packet_class(
+ packet.ACK, namespace=namespace, id=id, data=data))
+
+ def _handle_ack(self, eio_sid, namespace, id, data):
+ """Handle ACK packets from the client."""
+ namespace = namespace or '/'
+ sid = self.manager.sid_from_eio_sid(eio_sid, namespace)
+ self.logger.info('received ack from %s [%s]', sid, namespace)
+ self.manager.trigger_callback(sid, id, data)
+
+ def _trigger_event(self, event, namespace, *args):
+ """Invoke an application event handler."""
+ # first see if we have an explicit handler for the event
+ handler, args = self._get_event_handler(event, namespace, args)
+ if handler:
+ return handler(*args)
+ # or else, forward the event to a namespace handler if one exists
+ handler, args = self._get_namespace_handler(namespace, args)
+ if handler:
+ return handler.trigger_event(event, *args)
+ else:
+ return self.not_handled
+
+ def _handle_eio_connect(self, eio_sid, environ):
+ """Handle the Engine.IO connection event."""
+ if not self.manager_initialized:
+ self.manager_initialized = True
+ self.manager.initialize()
+ self.environ[eio_sid] = environ
+
+ def _handle_eio_message(self, eio_sid, data):
+ """Dispatch Engine.IO messages."""
+ if eio_sid in self._binary_packet:
+ pkt = self._binary_packet[eio_sid]
+ if pkt.add_attachment(data):
+ del self._binary_packet[eio_sid]
+ if pkt.packet_type == packet.BINARY_EVENT:
+ self._handle_event(eio_sid, pkt.namespace, pkt.id,
+ pkt.data)
+ else:
+ self._handle_ack(eio_sid, pkt.namespace, pkt.id, pkt.data)
+ else:
+ pkt = self.packet_class(encoded_packet=data)
+ if pkt.packet_type == packet.CONNECT:
+ self._handle_connect(eio_sid, pkt.namespace, pkt.data)
+ elif pkt.packet_type == packet.DISCONNECT:
+ self._handle_disconnect(eio_sid, pkt.namespace)
+ elif pkt.packet_type == packet.EVENT:
+ self._handle_event(eio_sid, pkt.namespace, pkt.id, pkt.data)
+ elif pkt.packet_type == packet.ACK:
+ self._handle_ack(eio_sid, pkt.namespace, pkt.id, pkt.data)
+ elif pkt.packet_type == packet.BINARY_EVENT or \
+ pkt.packet_type == packet.BINARY_ACK:
+ self._binary_packet[eio_sid] = pkt
+ elif pkt.packet_type == packet.CONNECT_ERROR:
+ raise ValueError('Unexpected CONNECT_ERROR packet.')
+ else:
+ raise ValueError('Unknown packet type.')
+
+ def _handle_eio_disconnect(self, eio_sid):
+ """Handle Engine.IO disconnect event."""
+ for n in list(self.manager.get_namespaces()).copy():
+ self._handle_disconnect(eio_sid, n)
+ if eio_sid in self.environ:
+ del self.environ[eio_sid]
+
+ def _engineio_server_class(self):
+ return engineio.Server
diff --git a/.venv/Lib/site-packages/socketio/simple_client.py b/.venv/Lib/site-packages/socketio/simple_client.py
new file mode 100644
index 0000000..6779147
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/simple_client.py
@@ -0,0 +1,191 @@
+from threading import Event
+from socketio import Client
+from socketio.exceptions import SocketIOError, TimeoutError, DisconnectedError
+
+
+class SimpleClient:
+ """A Socket.IO client.
+
+ This class implements a simple, yet fully compliant Socket.IO web client
+ with support for websocket and long-polling transports.
+
+ The positional and keyword arguments given in the constructor are passed
+ to the underlying :func:`socketio.Client` object.
+ """
+ def __init__(self, *args, **kwargs):
+ self.client_args = args
+ self.client_kwargs = kwargs
+ self.client = None
+ self.namespace = '/'
+ self.connected_event = Event()
+ self.connected = False
+ self.input_event = Event()
+ self.input_buffer = []
+
+ def connect(self, url, headers={}, auth=None, transports=None,
+ namespace='/', socketio_path='socket.io', wait_timeout=5):
+ """Connect to a Socket.IO server.
+
+ :param url: The URL of the Socket.IO server. It can include custom
+ query string parameters if required by the server. If a
+ function is provided, the client will invoke it to obtain
+ the URL each time a connection or reconnection is
+ attempted.
+ :param headers: A dictionary with custom headers to send with the
+ connection request. If a function is provided, the
+ client will invoke it to obtain the headers dictionary
+ each time a connection or reconnection is attempted.
+ :param auth: Authentication data passed to the server with the
+ connection request, normally a dictionary with one or
+ more string key/value pairs. If a function is provided,
+ the client will invoke it to obtain the authentication
+ data each time a connection or reconnection is attempted.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. If not
+ given, the polling transport is connected first,
+ then an upgrade to websocket is attempted.
+ :param namespace: The namespace to connect to as a string. If not
+ given, the default namespace ``/`` is used.
+ :param socketio_path: The endpoint where the Socket.IO server is
+ installed. The default value is appropriate for
+ most cases.
+ :param wait_timeout: How long the client should wait for the
+ connection to be established. The default is 5
+ seconds.
+ """
+ if self.connected:
+ raise RuntimeError('Already connected')
+ self.namespace = namespace
+ self.input_buffer = []
+ self.input_event.clear()
+ self.client = Client(*self.client_args, **self.client_kwargs)
+
+ @self.client.event(namespace=self.namespace)
+ def connect(): # pragma: no cover
+ self.connected = True
+ self.connected_event.set()
+
+ @self.client.event(namespace=self.namespace)
+ def disconnect(): # pragma: no cover
+ self.connected_event.clear()
+
+ @self.client.event(namespace=self.namespace)
+ def __disconnect_final(): # pragma: no cover
+ self.connected = False
+ self.connected_event.set()
+
+ @self.client.on('*', namespace=self.namespace)
+ def on_event(event, *args): # pragma: no cover
+ self.input_buffer.append([event, *args])
+ self.input_event.set()
+
+ self.client.connect(url, headers=headers, auth=auth,
+ transports=transports, namespaces=[namespace],
+ socketio_path=socketio_path,
+ wait_timeout=wait_timeout)
+
+ @property
+ def sid(self):
+ """The session ID received from the server.
+
+ The session ID is not guaranteed to remain constant throughout the life
+ of the connection, as reconnections can cause it to change.
+ """
+ return self.client.get_sid(self.namespace) if self.client else None
+
+ @property
+ def transport(self):
+ """The name of the transport currently in use.
+
+ The transport is returned as a string and can be one of ``polling``
+ and ``websocket``.
+ """
+ return self.client.transport if self.client else ''
+
+ def emit(self, event, data=None):
+ """Emit an event to the server.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+
+ This method schedules the event to be sent out and returns, without
+ actually waiting for its delivery. In cases where the client needs to
+ ensure that the event was received, :func:`socketio.SimpleClient.call`
+ should be used instead.
+ """
+ while True:
+ self.connected_event.wait()
+ if not self.connected:
+ raise DisconnectedError()
+ try:
+ return self.client.emit(event, data, namespace=self.namespace)
+ except SocketIOError:
+ pass
+
+ def call(self, event, data=None, timeout=60):
+ """Emit an event to the server and wait for a response.
+
+ This method issues an emit and waits for the server to provide a
+ response or acknowledgement. If the response does not arrive before the
+ timeout, then a ``TimeoutError`` exception is raised.
+
+ :param event: The event name. It can be any string. The event names
+ ``'connect'``, ``'message'`` and ``'disconnect'`` are
+ reserved and should not be used.
+ :param data: The data to send to the server. Data can be of
+ type ``str``, ``bytes``, ``list`` or ``dict``. To send
+ multiple arguments, use a tuple where each element is of
+ one of the types indicated above.
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the server acknowledges the event, then a
+ ``TimeoutError`` exception is raised.
+ """
+ while True:
+ self.connected_event.wait()
+ if not self.connected:
+ raise DisconnectedError()
+ try:
+ return self.client.call(event, data, namespace=self.namespace,
+ timeout=timeout)
+ except SocketIOError:
+ pass
+
+ def receive(self, timeout=None):
+ """Wait for an event from the server.
+
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the server acknowledges the event, then a
+ ``TimeoutError`` exception is raised.
+
+ The return value is a list with the event name as the first element. If
+ the server included arguments with the event, they are returned as
+ additional list elements.
+ """
+ while not self.input_buffer:
+ if not self.connected_event.wait(
+ timeout=timeout): # pragma: no cover
+ raise TimeoutError()
+ if not self.connected:
+ raise DisconnectedError()
+ if not self.input_event.wait(timeout=timeout):
+ raise TimeoutError()
+ self.input_event.clear()
+ return self.input_buffer.pop(0)
+
+ def disconnect(self):
+ """Disconnect from the server."""
+ if self.connected:
+ self.client.disconnect()
+ self.client = None
+ self.connected = False
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.disconnect()
diff --git a/.venv/Lib/site-packages/socketio/tornado.py b/.venv/Lib/site-packages/socketio/tornado.py
new file mode 100644
index 0000000..160bd32
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/tornado.py
@@ -0,0 +1,9 @@
+try:
+ from engineio.async_drivers.tornado import get_tornado_handler as \
+ get_engineio_handler
+except ImportError: # pragma: no cover
+ get_engineio_handler = None
+
+
+def get_tornado_handler(socketio_server): # pragma: no cover
+ return get_engineio_handler(socketio_server.eio)
diff --git a/.venv/Lib/site-packages/socketio/zmq_manager.py b/.venv/Lib/site-packages/socketio/zmq_manager.py
new file mode 100644
index 0000000..760fbc3
--- /dev/null
+++ b/.venv/Lib/site-packages/socketio/zmq_manager.py
@@ -0,0 +1,105 @@
+import pickle
+import re
+
+from .pubsub_manager import PubSubManager
+
+
+class ZmqManager(PubSubManager): # pragma: no cover
+ """zmq based client manager.
+
+ NOTE: this zmq implementation should be considered experimental at this
+ time. At this time, eventlet is required to use zmq.
+
+ This class implements a zmq backend for event sharing across multiple
+ processes. To use a zmq backend, initialize the :class:`Server` instance as
+ follows::
+
+ url = 'zmq+tcp://hostname:port1+port2'
+ server = socketio.Server(client_manager=socketio.ZmqManager(url))
+
+ :param url: The connection URL for the zmq message broker,
+ which will need to be provided and running.
+ :param channel: The channel name on which the server sends and receives
+ notifications. Must be the same in all the servers.
+ :param write_only: If set to ``True``, only initialize to emit events. The
+ default of ``False`` initializes the class for emitting
+ and receiving.
+
+ A zmq message broker must be running for the zmq_manager to work.
+ you can write your own or adapt one from the following simple broker
+ below::
+
+ import zmq
+
+ receiver = zmq.Context().socket(zmq.PULL)
+ receiver.bind("tcp://*:5555")
+
+ publisher = zmq.Context().socket(zmq.PUB)
+ publisher.bind("tcp://*:5556")
+
+ while True:
+ publisher.send(receiver.recv())
+ """
+ name = 'zmq'
+
+ def __init__(self, url='zmq+tcp://localhost:5555+5556',
+ channel='socketio',
+ write_only=False,
+ logger=None):
+ try:
+ from eventlet.green import zmq
+ except ImportError:
+ raise RuntimeError('zmq package is not installed '
+ '(Run "pip install pyzmq" in your '
+ 'virtualenv).')
+
+ r = re.compile(r':\d+\+\d+$')
+ if not (url.startswith('zmq+tcp://') and r.search(url)):
+ raise RuntimeError('unexpected connection string: ' + url)
+
+ url = url.replace('zmq+', '')
+ (sink_url, sub_port) = url.split('+')
+ sink_port = sink_url.split(':')[-1]
+ sub_url = sink_url.replace(sink_port, sub_port)
+
+ sink = zmq.Context().socket(zmq.PUSH)
+ sink.connect(sink_url)
+
+ sub = zmq.Context().socket(zmq.SUB)
+ sub.setsockopt_string(zmq.SUBSCRIBE, u'')
+ sub.connect(sub_url)
+
+ self.sink = sink
+ self.sub = sub
+ self.channel = channel
+ super().__init__(channel=channel, write_only=write_only, logger=logger)
+
+ def _publish(self, data):
+ pickled_data = pickle.dumps(
+ {
+ 'type': 'message',
+ 'channel': self.channel,
+ 'data': data
+ }
+ )
+ return self.sink.send(pickled_data)
+
+ def zmq_listen(self):
+ while True:
+ response = self.sub.recv()
+ if response is not None:
+ yield response
+
+ def _listen(self):
+ for message in self.zmq_listen():
+ if isinstance(message, bytes):
+ try:
+ message = pickle.loads(message)
+ except Exception:
+ pass
+ if isinstance(message, dict) and \
+ message['type'] == 'message' and \
+ message['channel'] == self.channel and \
+ 'data' in message:
+ yield message['data']
+ return
diff --git a/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/INSTALLER b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/LICENSE b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/LICENSE
new file mode 100644
index 0000000..cc10784
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 Benno Rice and contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/METADATA b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/METADATA
new file mode 100644
index 0000000..32e8bdb
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/METADATA
@@ -0,0 +1,177 @@
+Metadata-Version: 2.1
+Name: wsproto
+Version: 1.2.0
+Summary: WebSockets state-machine based protocol implementation
+Home-page: https://github.com/python-hyper/wsproto/
+Author: Benno Rice
+Author-email: benno@jeamland.net
+License: MIT License
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.7.0
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Requires-Dist: h11 (<1,>=0.9.0)
+
+========================================================
+Pure Python, pure state-machine WebSocket implementation
+========================================================
+
+.. image:: https://github.com/python-hyper/wsproto/workflows/CI/badge.svg
+ :target: https://github.com/python-hyper/wsproto/actions
+ :alt: Build Status
+.. image:: https://codecov.io/gh/python-hyper/wsproto/branch/main/graph/badge.svg
+ :target: https://codecov.io/gh/python-hyper/wsproto
+ :alt: Code Coverage
+.. image:: https://readthedocs.org/projects/wsproto/badge/?version=latest
+ :target: https://wsproto.readthedocs.io/en/latest/
+ :alt: Documentation Status
+.. image:: https://img.shields.io/badge/chat-join_now-brightgreen.svg
+ :target: https://gitter.im/python-hyper/community
+ :alt: Chat community
+
+
+This repository contains a pure-Python implementation of a WebSocket protocol
+stack. It's written from the ground up to be embeddable in whatever program you
+choose to use, ensuring that you can communicate via WebSockets, as defined in
+`RFC6455 `_, regardless of your programming
+paradigm.
+
+This repository does not provide a parsing layer, a network layer, or any rules
+about concurrency. Instead, it's a purely in-memory solution, defined in terms
+of data actions and WebSocket frames. RFC6455 and Compression Extensions for
+WebSocket via `RFC7692 `_ are fully
+supported.
+
+wsproto supports Python 3.6.1 or higher.
+
+To install it, just run:
+
+.. code-block:: console
+
+ $ pip install wsproto
+
+
+Usage
+=====
+
+Let's assume you have some form of network socket available. wsproto client
+connections automatically generate a HTTP request to initiate the WebSocket
+handshake. To create a WebSocket client connection:
+
+.. code-block:: python
+
+ from wsproto import WSConnection, ConnectionType
+ from wsproto.events import Request
+
+ ws = WSConnection(ConnectionType.CLIENT)
+ ws.send(Request(host='echo.websocket.org', target='/'))
+
+To create a WebSocket server connection:
+
+.. code-block:: python
+
+ from wsproto.connection import WSConnection, ConnectionType
+
+ ws = WSConnection(ConnectionType.SERVER)
+
+Every time you send a message, or call a ping, or simply if you receive incoming
+data, wsproto might respond with some outgoing data that you have to send:
+
+.. code-block:: python
+
+ some_socket.send(ws.bytes_to_send())
+
+Both connection types need to receive incoming data:
+
+.. code-block:: python
+
+ ws.receive_data(some_byte_string_of_data)
+
+And wsproto will issue events if the data contains any WebSocket messages or state changes:
+
+.. code-block:: python
+
+ for event in ws.events():
+ if isinstance(event, Request):
+ # only client connections get this event
+ ws.send(AcceptConnection())
+ elif isinstance(event, CloseConnection):
+ # guess nobody wants to talk to us any more...
+ elif isinstance(event, TextMessage):
+ print('We got text!', event.data)
+ elif isinstance(event, BytesMessage):
+ print('We got bytes!', event.data)
+
+Take a look at our docs for a `full list of events
+`!
+
+Testing
+=======
+
+It passes the autobahn test suite completely and strictly in both client and
+server modes and using permessage-deflate.
+
+If you want to run the compliance tests, go into the compliance directory and
+then to test client mode, in one shell run the Autobahn test server:
+
+.. code-block:: console
+
+ $ wstest -m fuzzingserver -s ws-fuzzingserver.json
+
+And in another shell run the test client:
+
+.. code-block:: console
+
+ $ python test_client.py
+
+And to test server mode, run the test server:
+
+.. code-block:: console
+
+ $ python test_server.py
+
+And in another shell run the Autobahn test client:
+
+.. code-block:: console
+
+ $ wstest -m fuzzingclient -s ws-fuzzingclient.json
+
+
+Documentation
+=============
+
+Documentation is available at https://wsproto.readthedocs.io/en/latest/.
+
+Contributing
+============
+
+``wsproto`` welcomes contributions from anyone! Unlike many other projects we
+are happy to accept cosmetic contributions and small contributions, in addition
+to large feature requests and changes.
+
+Before you contribute (either by opening an issue or filing a pull request),
+please `read the contribution guidelines`_.
+
+.. _read the contribution guidelines: http://python-hyper.org/en/latest/contributing.html
+
+License
+=======
+
+``wsproto`` is made available under the MIT License. For more details, see the
+``LICENSE`` file in the repository.
+
+Authors
+=======
+
+``wsproto`` was created by @jeamland, and is maintained by the python-hyper
+community.
diff --git a/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/RECORD b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/RECORD
new file mode 100644
index 0000000..2d1b20c
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/RECORD
@@ -0,0 +1,23 @@
+wsproto-1.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+wsproto-1.2.0.dist-info/LICENSE,sha256=wDKajb80N7CV9_XPQlfWu4VeBxIMroeGWGBz_3ppmVk,1093
+wsproto-1.2.0.dist-info/METADATA,sha256=V7EI9a-gXS3NLxeYYmWqtf_MkMfepGoTF03IaR-OVwo,5607
+wsproto-1.2.0.dist-info/RECORD,,
+wsproto-1.2.0.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
+wsproto-1.2.0.dist-info/top_level.txt,sha256=BUdIrwL11zET0fkWkYRJ1yZKrEfvDF9DZqjhABOio6Y,8
+wsproto/__init__.py,sha256=zQSIjLjveTHwyhGAfqG_n_cVl54hTkeV6vuad1cnEOE,2887
+wsproto/__pycache__/__init__.cpython-312.pyc,,
+wsproto/__pycache__/connection.cpython-312.pyc,,
+wsproto/__pycache__/events.cpython-312.pyc,,
+wsproto/__pycache__/extensions.cpython-312.pyc,,
+wsproto/__pycache__/frame_protocol.cpython-312.pyc,,
+wsproto/__pycache__/handshake.cpython-312.pyc,,
+wsproto/__pycache__/typing.cpython-312.pyc,,
+wsproto/__pycache__/utilities.cpython-312.pyc,,
+wsproto/connection.py,sha256=LhsbokxZUmAtMsOFFZ45puZDPyIXNEmq7SanE7swAgE,6813
+wsproto/events.py,sha256=DW7YQ823oK3MjXHqcPvjJzjBGk5UGuMO_rpNnKgmmW8,7979
+wsproto/extensions.py,sha256=VlnojvsC2AO7vbUkw_TOqCgtmHb1dSplXeRwjMfjCo4,11211
+wsproto/frame_protocol.py,sha256=B5p_wRq54gvTihegbJ39RkONrdhtipoYDEiYq95BdAk,23401
+wsproto/handshake.py,sha256=hPqTo15MqOxYlvcNYTo-bIzQJHtRLq8qq2jBdHdz2x8,18036
+wsproto/py.typed,sha256=sow9soTwP9T_gEAQSVh7Gb8855h04Nwmhs2We-JRgZM,7
+wsproto/typing.py,sha256=Ryf6eOhAzfZlHCFELiiayDzNqdXQG7JKccblOqNx6Wo,68
+wsproto/utilities.py,sha256=5qmPXSUhUp2GESgvgIacZ7N4uqd0vBijhVV7t6XTiZw,2816
diff --git a/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/WHEEL b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/WHEEL
new file mode 100644
index 0000000..5bad85f
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/top_level.txt b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/top_level.txt
new file mode 100644
index 0000000..8b7d144
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto-1.2.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+wsproto
diff --git a/.venv/Lib/site-packages/wsproto/__init__.py b/.venv/Lib/site-packages/wsproto/__init__.py
new file mode 100644
index 0000000..46fde3f
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto/__init__.py
@@ -0,0 +1,94 @@
+"""
+wsproto
+~~~~~~~
+
+A WebSocket implementation.
+"""
+from typing import Generator, Optional, Union
+
+from .connection import Connection, ConnectionState, ConnectionType
+from .events import Event
+from .handshake import H11Handshake
+from .typing import Headers
+
+__version__ = "1.2.0"
+
+
+class WSConnection:
+ """
+ Represents the local end of a WebSocket connection to a remote peer.
+ """
+
+ def __init__(self, connection_type: ConnectionType) -> None:
+ """
+ Constructor
+
+ :param wsproto.connection.ConnectionType connection_type: Controls
+ whether the library behaves as a client or as a server.
+ """
+ self.client = connection_type is ConnectionType.CLIENT
+ self.handshake = H11Handshake(connection_type)
+ self.connection: Optional[Connection] = None
+
+ @property
+ def state(self) -> ConnectionState:
+ """
+ :returns: Connection state
+ :rtype: wsproto.connection.ConnectionState
+ """
+ if self.connection is None:
+ return self.handshake.state
+ return self.connection.state
+
+ def initiate_upgrade_connection(
+ self, headers: Headers, path: Union[bytes, str]
+ ) -> None:
+ self.handshake.initiate_upgrade_connection(headers, path)
+
+ def send(self, event: Event) -> bytes:
+ """
+ Generate network data for the specified event.
+
+ When you want to communicate with a WebSocket peer, you should construct
+ an event and pass it to this method. This method will return the bytes
+ that you should send to the peer.
+
+ :param wsproto.events.Event event: The event to generate data for
+ :returns bytes: The data to send to the peer
+ """
+ data = b""
+ if self.connection is None:
+ data += self.handshake.send(event)
+ self.connection = self.handshake.connection
+ else:
+ data += self.connection.send(event)
+ return data
+
+ def receive_data(self, data: Optional[bytes]) -> None:
+ """
+ Feed network data into the connection instance.
+
+ After calling this method, you should call :meth:`events` to see if the
+ received data triggered any new events.
+
+ :param bytes data: Data received from remote peer
+ """
+ if self.connection is None:
+ self.handshake.receive_data(data)
+ self.connection = self.handshake.connection
+ else:
+ self.connection.receive_data(data)
+
+ def events(self) -> Generator[Event, None, None]:
+ """
+ A generator that yields pending events.
+
+ Each event is an instance of a subclass of
+ :class:`wsproto.events.Event`.
+ """
+ yield from self.handshake.events()
+ if self.connection is not None:
+ yield from self.connection.events()
+
+
+__all__ = ("ConnectionType", "WSConnection")
diff --git a/.venv/Lib/site-packages/wsproto/__pycache__/__init__.cpython-312.pyc b/.venv/Lib/site-packages/wsproto/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000..63f96c4
Binary files /dev/null and b/.venv/Lib/site-packages/wsproto/__pycache__/__init__.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/wsproto/__pycache__/connection.cpython-312.pyc b/.venv/Lib/site-packages/wsproto/__pycache__/connection.cpython-312.pyc
new file mode 100644
index 0000000..038d4f4
Binary files /dev/null and b/.venv/Lib/site-packages/wsproto/__pycache__/connection.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/wsproto/__pycache__/events.cpython-312.pyc b/.venv/Lib/site-packages/wsproto/__pycache__/events.cpython-312.pyc
new file mode 100644
index 0000000..0c26754
Binary files /dev/null and b/.venv/Lib/site-packages/wsproto/__pycache__/events.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/wsproto/__pycache__/extensions.cpython-312.pyc b/.venv/Lib/site-packages/wsproto/__pycache__/extensions.cpython-312.pyc
new file mode 100644
index 0000000..0d49f8c
Binary files /dev/null and b/.venv/Lib/site-packages/wsproto/__pycache__/extensions.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/wsproto/__pycache__/frame_protocol.cpython-312.pyc b/.venv/Lib/site-packages/wsproto/__pycache__/frame_protocol.cpython-312.pyc
new file mode 100644
index 0000000..7562a65
Binary files /dev/null and b/.venv/Lib/site-packages/wsproto/__pycache__/frame_protocol.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/wsproto/__pycache__/handshake.cpython-312.pyc b/.venv/Lib/site-packages/wsproto/__pycache__/handshake.cpython-312.pyc
new file mode 100644
index 0000000..0d8d910
Binary files /dev/null and b/.venv/Lib/site-packages/wsproto/__pycache__/handshake.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/wsproto/__pycache__/typing.cpython-312.pyc b/.venv/Lib/site-packages/wsproto/__pycache__/typing.cpython-312.pyc
new file mode 100644
index 0000000..45626d7
Binary files /dev/null and b/.venv/Lib/site-packages/wsproto/__pycache__/typing.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/wsproto/__pycache__/utilities.cpython-312.pyc b/.venv/Lib/site-packages/wsproto/__pycache__/utilities.cpython-312.pyc
new file mode 100644
index 0000000..35465b7
Binary files /dev/null and b/.venv/Lib/site-packages/wsproto/__pycache__/utilities.cpython-312.pyc differ
diff --git a/.venv/Lib/site-packages/wsproto/connection.py b/.venv/Lib/site-packages/wsproto/connection.py
new file mode 100644
index 0000000..4439165
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto/connection.py
@@ -0,0 +1,189 @@
+"""
+wsproto/connection
+~~~~~~~~~~~~~~~~~~
+
+An implementation of a WebSocket connection.
+"""
+
+from collections import deque
+from enum import Enum
+from typing import Deque, Generator, List, Optional
+
+from .events import (
+ BytesMessage,
+ CloseConnection,
+ Event,
+ Message,
+ Ping,
+ Pong,
+ TextMessage,
+)
+from .extensions import Extension
+from .frame_protocol import CloseReason, FrameProtocol, Opcode, ParseFailed
+from .utilities import LocalProtocolError
+
+
+class ConnectionState(Enum):
+ """
+ RFC 6455, Section 4 - Opening Handshake
+ """
+
+ #: The opening handshake is in progress.
+ CONNECTING = 0
+ #: The opening handshake is complete.
+ OPEN = 1
+ #: The remote WebSocket has initiated a connection close.
+ REMOTE_CLOSING = 2
+ #: The local WebSocket (i.e. this instance) has initiated a connection close.
+ LOCAL_CLOSING = 3
+ #: The closing handshake has completed.
+ CLOSED = 4
+ #: The connection was rejected during the opening handshake.
+ REJECTING = 5
+
+
+class ConnectionType(Enum):
+ """An enumeration of connection types."""
+
+ #: This connection will act as client and talk to a remote server
+ CLIENT = 1
+
+ #: This connection will as as server and waits for client connections
+ SERVER = 2
+
+
+CLIENT = ConnectionType.CLIENT
+SERVER = ConnectionType.SERVER
+
+
+class Connection:
+ """
+ A low-level WebSocket connection object.
+
+ This wraps two other protocol objects, an HTTP/1.1 protocol object used
+ to do the initial HTTP upgrade handshake and a WebSocket frame protocol
+ object used to exchange messages and other control frames.
+
+ :param conn_type: Whether this object is on the client- or server-side of
+ a connection. To initialise as a client pass ``CLIENT`` otherwise
+ pass ``SERVER``.
+ :type conn_type: ``ConnectionType``
+ """
+
+ def __init__(
+ self,
+ connection_type: ConnectionType,
+ extensions: Optional[List[Extension]] = None,
+ trailing_data: bytes = b"",
+ ) -> None:
+ self.client = connection_type is ConnectionType.CLIENT
+ self._events: Deque[Event] = deque()
+ self._proto = FrameProtocol(self.client, extensions or [])
+ self._state = ConnectionState.OPEN
+ self.receive_data(trailing_data)
+
+ @property
+ def state(self) -> ConnectionState:
+ return self._state
+
+ def send(self, event: Event) -> bytes:
+ data = b""
+ if isinstance(event, Message) and self.state == ConnectionState.OPEN:
+ data += self._proto.send_data(event.data, event.message_finished)
+ elif isinstance(event, Ping) and self.state == ConnectionState.OPEN:
+ data += self._proto.ping(event.payload)
+ elif isinstance(event, Pong) and self.state == ConnectionState.OPEN:
+ data += self._proto.pong(event.payload)
+ elif isinstance(event, CloseConnection) and self.state in {
+ ConnectionState.OPEN,
+ ConnectionState.REMOTE_CLOSING,
+ }:
+ data += self._proto.close(event.code, event.reason)
+ if self.state == ConnectionState.REMOTE_CLOSING:
+ self._state = ConnectionState.CLOSED
+ else:
+ self._state = ConnectionState.LOCAL_CLOSING
+ else:
+ raise LocalProtocolError(
+ f"Event {event} cannot be sent in state {self.state}."
+ )
+ return data
+
+ def receive_data(self, data: Optional[bytes]) -> None:
+ """
+ Pass some received data to the connection for handling.
+
+ A list of events that the remote peer triggered by sending this data can
+ be retrieved with :meth:`~wsproto.connection.Connection.events`.
+
+ :param data: The data received from the remote peer on the network.
+ :type data: ``bytes``
+ """
+
+ if data is None:
+ # "If _The WebSocket Connection is Closed_ and no Close control
+ # frame was received by the endpoint (such as could occur if the
+ # underlying transport connection is lost), _The WebSocket
+ # Connection Close Code_ is considered to be 1006."
+ self._events.append(CloseConnection(code=CloseReason.ABNORMAL_CLOSURE))
+ self._state = ConnectionState.CLOSED
+ return
+
+ if self.state in (ConnectionState.OPEN, ConnectionState.LOCAL_CLOSING):
+ self._proto.receive_bytes(data)
+ elif self.state is ConnectionState.CLOSED:
+ raise LocalProtocolError("Connection already closed.")
+ else:
+ pass # pragma: no cover
+
+ def events(self) -> Generator[Event, None, None]:
+ """
+ Return a generator that provides any events that have been generated
+ by protocol activity.
+
+ :returns: generator of :class:`Event ` subclasses
+ """
+ while self._events:
+ yield self._events.popleft()
+
+ try:
+ for frame in self._proto.received_frames():
+ if frame.opcode is Opcode.PING:
+ assert frame.frame_finished and frame.message_finished
+ assert isinstance(frame.payload, (bytes, bytearray))
+ yield Ping(payload=frame.payload)
+
+ elif frame.opcode is Opcode.PONG:
+ assert frame.frame_finished and frame.message_finished
+ assert isinstance(frame.payload, (bytes, bytearray))
+ yield Pong(payload=frame.payload)
+
+ elif frame.opcode is Opcode.CLOSE:
+ assert isinstance(frame.payload, tuple)
+ code, reason = frame.payload
+ if self.state is ConnectionState.LOCAL_CLOSING:
+ self._state = ConnectionState.CLOSED
+ else:
+ self._state = ConnectionState.REMOTE_CLOSING
+ yield CloseConnection(code=code, reason=reason)
+
+ elif frame.opcode is Opcode.TEXT:
+ assert isinstance(frame.payload, str)
+ yield TextMessage(
+ data=frame.payload,
+ frame_finished=frame.frame_finished,
+ message_finished=frame.message_finished,
+ )
+
+ elif frame.opcode is Opcode.BINARY:
+ assert isinstance(frame.payload, (bytes, bytearray))
+ yield BytesMessage(
+ data=frame.payload,
+ frame_finished=frame.frame_finished,
+ message_finished=frame.message_finished,
+ )
+
+ else:
+ pass # pragma: no cover
+ except ParseFailed as exc:
+ yield CloseConnection(code=exc.code, reason=str(exc))
diff --git a/.venv/Lib/site-packages/wsproto/events.py b/.venv/Lib/site-packages/wsproto/events.py
new file mode 100644
index 0000000..d758f8a
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto/events.py
@@ -0,0 +1,295 @@
+"""
+wsproto/events
+~~~~~~~~~~~~~~
+
+Events that result from processing data on a WebSocket connection.
+"""
+from abc import ABC
+from dataclasses import dataclass, field
+from typing import Generic, List, Optional, Sequence, TypeVar, Union
+
+from .extensions import Extension
+from .typing import Headers
+
+
+class Event(ABC):
+ """
+ Base class for wsproto events.
+ """
+
+ pass # noqa
+
+
+@dataclass(frozen=True)
+class Request(Event):
+ """The beginning of a Websocket connection, the HTTP Upgrade request
+
+ This event is fired when a SERVER connection receives a WebSocket
+ handshake request (HTTP with upgrade header).
+
+ Fields:
+
+ .. attribute:: host
+
+ (Required) The hostname, or host header value.
+
+ .. attribute:: target
+
+ (Required) The request target (path and query string)
+
+ .. attribute:: extensions
+
+ The proposed extensions.
+
+ .. attribute:: extra_headers
+
+ The additional request headers, excluding extensions, host, subprotocols,
+ and version headers.
+
+ .. attribute:: subprotocols
+
+ A list of the subprotocols proposed in the request, as a list
+ of strings.
+ """
+
+ host: str
+ target: str
+ extensions: Union[Sequence[Extension], Sequence[str]] = field( # type: ignore[assignment]
+ default_factory=list
+ )
+ extra_headers: Headers = field(default_factory=list)
+ subprotocols: List[str] = field(default_factory=list)
+
+
+@dataclass(frozen=True)
+class AcceptConnection(Event):
+ """The acceptance of a Websocket upgrade request.
+
+ This event is fired when a CLIENT receives an acceptance response
+ from a server. It is also used to accept an upgrade request when
+ acting as a SERVER.
+
+ Fields:
+
+ .. attribute:: extra_headers
+
+ Any additional (non websocket related) headers present in the
+ acceptance response.
+
+ .. attribute:: subprotocol
+
+ The accepted subprotocol to use.
+
+ """
+
+ subprotocol: Optional[str] = None
+ extensions: List[Extension] = field(default_factory=list)
+ extra_headers: Headers = field(default_factory=list)
+
+
+@dataclass(frozen=True)
+class RejectConnection(Event):
+ """The rejection of a Websocket upgrade request, the HTTP response.
+
+ The ``RejectConnection`` event sends the appropriate HTTP headers to
+ communicate to the peer that the handshake has been rejected. You may also
+ send an HTTP body by setting the ``has_body`` attribute to ``True`` and then
+ sending one or more :class:`RejectData` events after this one. When sending
+ a response body, the caller should set the ``Content-Length``,
+ ``Content-Type``, and/or ``Transfer-Encoding`` headers as appropriate.
+
+ When receiving a ``RejectConnection`` event, the ``has_body`` attribute will
+ in almost all cases be ``True`` (even if the server set it to ``False``) and
+ will be followed by at least one ``RejectData`` events, even though the data
+ itself might be just ``b""``. (The only scenario in which the caller
+ receives a ``RejectConnection`` with ``has_body == False`` is if the peer
+ violates sends an informational status code (1xx) other than 101.)
+
+ The ``has_body`` attribute should only be used when receiving the event. (It
+ has ) is False the headers must include a
+ content-length or transfer encoding.
+
+ Fields:
+
+ .. attribute:: headers (Headers)
+
+ The headers to send with the response.
+
+ .. attribute:: has_body
+
+ This defaults to False, but set to True if there is a body. See
+ also :class:`~RejectData`.
+
+ .. attribute:: status_code
+
+ The response status code.
+
+ """
+
+ status_code: int = 400
+ headers: Headers = field(default_factory=list)
+ has_body: bool = False
+
+
+@dataclass(frozen=True)
+class RejectData(Event):
+ """The rejection HTTP response body.
+
+ The caller may send multiple ``RejectData`` events. The final event should
+ have the ``body_finished`` attribute set to ``True``.
+
+ Fields:
+
+ .. attribute:: body_finished
+
+ True if this is the final chunk of the body data.
+
+ .. attribute:: data (bytes)
+
+ (Required) The raw body data.
+
+ """
+
+ data: bytes
+ body_finished: bool = True
+
+
+@dataclass(frozen=True)
+class CloseConnection(Event):
+
+ """The end of a Websocket connection, represents a closure frame.
+
+ **wsproto does not automatically send a response to a close event.** To
+ comply with the RFC you MUST send a close event back to the remote WebSocket
+ if you have not already sent one. The :meth:`response` method provides a
+ suitable event for this purpose, and you should check if a response needs
+ to be sent by checking :func:`wsproto.WSConnection.state`.
+
+ Fields:
+
+ .. attribute:: code
+
+ (Required) The integer close code to indicate why the connection
+ has closed.
+
+ .. attribute:: reason
+
+ Additional reasoning for why the connection has closed.
+
+ """
+
+ code: int
+ reason: Optional[str] = None
+
+ def response(self) -> "CloseConnection":
+ """Generate an RFC-compliant close frame to send back to the peer."""
+ return CloseConnection(code=self.code, reason=self.reason)
+
+
+T = TypeVar("T", bytes, str)
+
+
+@dataclass(frozen=True)
+class Message(Event, Generic[T]):
+ """The websocket data message.
+
+ Fields:
+
+ .. attribute:: data
+
+ (Required) The message data as byte string, can be decoded as UTF-8 for
+ TEXT messages. This only represents a single chunk of data and
+ not a full WebSocket message. You need to buffer and
+ reassemble these chunks to get the full message.
+
+ .. attribute:: frame_finished
+
+ This has no semantic content, but is provided just in case some
+ weird edge case user wants to be able to reconstruct the
+ fragmentation pattern of the original stream.
+
+ .. attribute:: message_finished
+
+ True if this frame is the last one of this message, False if
+ more frames are expected.
+
+ """
+
+ data: T
+ frame_finished: bool = True
+ message_finished: bool = True
+
+
+@dataclass(frozen=True)
+class TextMessage(Message[str]): # pylint: disable=unsubscriptable-object
+ """This event is fired when a data frame with TEXT payload is received.
+
+ Fields:
+
+ .. attribute:: data
+
+ The message data as string, This only represents a single chunk
+ of data and not a full WebSocket message. You need to buffer
+ and reassemble these chunks to get the full message.
+
+ """
+
+ # https://github.com/python/mypy/issues/5744
+ data: str
+
+
+@dataclass(frozen=True)
+class BytesMessage(Message[bytes]): # pylint: disable=unsubscriptable-object
+ """This event is fired when a data frame with BINARY payload is
+ received.
+
+ Fields:
+
+ .. attribute:: data
+
+ The message data as byte string, can be decoded as UTF-8 for
+ TEXT messages. This only represents a single chunk of data and
+ not a full WebSocket message. You need to buffer and
+ reassemble these chunks to get the full message.
+ """
+
+ # https://github.com/python/mypy/issues/5744
+ data: bytes
+
+
+@dataclass(frozen=True)
+class Ping(Event):
+ """The Ping event can be sent to trigger a ping frame and is fired
+ when a Ping is received.
+
+ **wsproto does not automatically send a pong response to a ping event.** To
+ comply with the RFC you MUST send a pong even as soon as is practical. The
+ :meth:`response` method provides a suitable event for this purpose.
+
+ Fields:
+
+ .. attribute:: payload
+
+ An optional payload to emit with the ping frame.
+ """
+
+ payload: bytes = b""
+
+ def response(self) -> "Pong":
+ """Generate an RFC-compliant :class:`Pong` response to this ping."""
+ return Pong(payload=self.payload)
+
+
+@dataclass(frozen=True)
+class Pong(Event):
+ """The Pong event is fired when a Pong is received.
+
+ Fields:
+
+ .. attribute:: payload
+
+ An optional payload to emit with the pong frame.
+
+ """
+
+ payload: bytes = b""
diff --git a/.venv/Lib/site-packages/wsproto/extensions.py b/.venv/Lib/site-packages/wsproto/extensions.py
new file mode 100644
index 0000000..ea8555d
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto/extensions.py
@@ -0,0 +1,315 @@
+"""
+wsproto/extensions
+~~~~~~~~~~~~~~~~~~
+
+WebSocket extensions.
+"""
+
+import zlib
+from typing import Optional, Tuple, Union
+
+from .frame_protocol import CloseReason, FrameDecoder, FrameProtocol, Opcode, RsvBits
+
+
+class Extension:
+ name: str
+
+ def enabled(self) -> bool:
+ return False
+
+ def offer(self) -> Union[bool, str]:
+ pass
+
+ def accept(self, offer: str) -> Optional[Union[bool, str]]:
+ pass
+
+ def finalize(self, offer: str) -> None:
+ pass
+
+ def frame_inbound_header(
+ self,
+ proto: Union[FrameDecoder, FrameProtocol],
+ opcode: Opcode,
+ rsv: RsvBits,
+ payload_length: int,
+ ) -> Union[CloseReason, RsvBits]:
+ return RsvBits(False, False, False)
+
+ def frame_inbound_payload_data(
+ self, proto: Union[FrameDecoder, FrameProtocol], data: bytes
+ ) -> Union[bytes, CloseReason]:
+ return data
+
+ def frame_inbound_complete(
+ self, proto: Union[FrameDecoder, FrameProtocol], fin: bool
+ ) -> Union[bytes, CloseReason, None]:
+ pass
+
+ def frame_outbound(
+ self,
+ proto: Union[FrameDecoder, FrameProtocol],
+ opcode: Opcode,
+ rsv: RsvBits,
+ data: bytes,
+ fin: bool,
+ ) -> Tuple[RsvBits, bytes]:
+ return (rsv, data)
+
+
+class PerMessageDeflate(Extension):
+ name = "permessage-deflate"
+
+ DEFAULT_CLIENT_MAX_WINDOW_BITS = 15
+ DEFAULT_SERVER_MAX_WINDOW_BITS = 15
+
+ def __init__(
+ self,
+ client_no_context_takeover: bool = False,
+ client_max_window_bits: Optional[int] = None,
+ server_no_context_takeover: bool = False,
+ server_max_window_bits: Optional[int] = None,
+ ) -> None:
+ self.client_no_context_takeover = client_no_context_takeover
+ self.server_no_context_takeover = server_no_context_takeover
+ self._client_max_window_bits = self.DEFAULT_CLIENT_MAX_WINDOW_BITS
+ self._server_max_window_bits = self.DEFAULT_SERVER_MAX_WINDOW_BITS
+ if client_max_window_bits is not None:
+ self.client_max_window_bits = client_max_window_bits
+ if server_max_window_bits is not None:
+ self.server_max_window_bits = server_max_window_bits
+
+ self._compressor: Optional[zlib._Compress] = None # noqa
+ self._decompressor: Optional[zlib._Decompress] = None # noqa
+ # This refers to the current frame
+ self._inbound_is_compressible: Optional[bool] = None
+ # This refers to the ongoing message (which might span multiple
+ # frames). Only the first frame in a fragmented message is flagged for
+ # compression, so this carries that bit forward.
+ self._inbound_compressed: Optional[bool] = None
+
+ self._enabled = False
+
+ @property
+ def client_max_window_bits(self) -> int:
+ return self._client_max_window_bits
+
+ @client_max_window_bits.setter
+ def client_max_window_bits(self, value: int) -> None:
+ if value < 9 or value > 15:
+ raise ValueError("Window size must be between 9 and 15 inclusive")
+ self._client_max_window_bits = value
+
+ @property
+ def server_max_window_bits(self) -> int:
+ return self._server_max_window_bits
+
+ @server_max_window_bits.setter
+ def server_max_window_bits(self, value: int) -> None:
+ if value < 9 or value > 15:
+ raise ValueError("Window size must be between 9 and 15 inclusive")
+ self._server_max_window_bits = value
+
+ def _compressible_opcode(self, opcode: Opcode) -> bool:
+ return opcode in (Opcode.TEXT, Opcode.BINARY, Opcode.CONTINUATION)
+
+ def enabled(self) -> bool:
+ return self._enabled
+
+ def offer(self) -> Union[bool, str]:
+ parameters = [
+ "client_max_window_bits=%d" % self.client_max_window_bits,
+ "server_max_window_bits=%d" % self.server_max_window_bits,
+ ]
+
+ if self.client_no_context_takeover:
+ parameters.append("client_no_context_takeover")
+ if self.server_no_context_takeover:
+ parameters.append("server_no_context_takeover")
+
+ return "; ".join(parameters)
+
+ def finalize(self, offer: str) -> None:
+ bits = [b.strip() for b in offer.split(";")]
+ for bit in bits[1:]:
+ if bit.startswith("client_no_context_takeover"):
+ self.client_no_context_takeover = True
+ elif bit.startswith("server_no_context_takeover"):
+ self.server_no_context_takeover = True
+ elif bit.startswith("client_max_window_bits"):
+ self.client_max_window_bits = int(bit.split("=", 1)[1].strip())
+ elif bit.startswith("server_max_window_bits"):
+ self.server_max_window_bits = int(bit.split("=", 1)[1].strip())
+
+ self._enabled = True
+
+ def _parse_params(self, params: str) -> Tuple[Optional[int], Optional[int]]:
+ client_max_window_bits = None
+ server_max_window_bits = None
+
+ bits = [b.strip() for b in params.split(";")]
+ for bit in bits[1:]:
+ if bit.startswith("client_no_context_takeover"):
+ self.client_no_context_takeover = True
+ elif bit.startswith("server_no_context_takeover"):
+ self.server_no_context_takeover = True
+ elif bit.startswith("client_max_window_bits"):
+ if "=" in bit:
+ client_max_window_bits = int(bit.split("=", 1)[1].strip())
+ else:
+ client_max_window_bits = self.client_max_window_bits
+ elif bit.startswith("server_max_window_bits"):
+ if "=" in bit:
+ server_max_window_bits = int(bit.split("=", 1)[1].strip())
+ else:
+ server_max_window_bits = self.server_max_window_bits
+
+ return client_max_window_bits, server_max_window_bits
+
+ def accept(self, offer: str) -> Union[bool, None, str]:
+ client_max_window_bits, server_max_window_bits = self._parse_params(offer)
+
+ parameters = []
+
+ if self.client_no_context_takeover:
+ parameters.append("client_no_context_takeover")
+ if self.server_no_context_takeover:
+ parameters.append("server_no_context_takeover")
+ try:
+ if client_max_window_bits is not None:
+ parameters.append("client_max_window_bits=%d" % client_max_window_bits)
+ self.client_max_window_bits = client_max_window_bits
+ if server_max_window_bits is not None:
+ parameters.append("server_max_window_bits=%d" % server_max_window_bits)
+ self.server_max_window_bits = server_max_window_bits
+ except ValueError:
+ return None
+ else:
+ self._enabled = True
+ return "; ".join(parameters)
+
+ def frame_inbound_header(
+ self,
+ proto: Union[FrameDecoder, FrameProtocol],
+ opcode: Opcode,
+ rsv: RsvBits,
+ payload_length: int,
+ ) -> Union[CloseReason, RsvBits]:
+ if rsv.rsv1 and opcode.iscontrol():
+ return CloseReason.PROTOCOL_ERROR
+ if rsv.rsv1 and opcode is Opcode.CONTINUATION:
+ return CloseReason.PROTOCOL_ERROR
+
+ self._inbound_is_compressible = self._compressible_opcode(opcode)
+
+ if self._inbound_compressed is None:
+ self._inbound_compressed = rsv.rsv1
+ if self._inbound_compressed:
+ assert self._inbound_is_compressible
+ if proto.client:
+ bits = self.server_max_window_bits
+ else:
+ bits = self.client_max_window_bits
+ if self._decompressor is None:
+ self._decompressor = zlib.decompressobj(-int(bits))
+
+ return RsvBits(True, False, False)
+
+ def frame_inbound_payload_data(
+ self, proto: Union[FrameDecoder, FrameProtocol], data: bytes
+ ) -> Union[bytes, CloseReason]:
+ if not self._inbound_compressed or not self._inbound_is_compressible:
+ return data
+ assert self._decompressor is not None
+
+ try:
+ return self._decompressor.decompress(bytes(data))
+ except zlib.error:
+ return CloseReason.INVALID_FRAME_PAYLOAD_DATA
+
+ def frame_inbound_complete(
+ self, proto: Union[FrameDecoder, FrameProtocol], fin: bool
+ ) -> Union[bytes, CloseReason, None]:
+ if not fin:
+ return None
+ if not self._inbound_is_compressible:
+ self._inbound_compressed = None
+ return None
+ if not self._inbound_compressed:
+ self._inbound_compressed = None
+ return None
+ assert self._decompressor is not None
+
+ try:
+ data = self._decompressor.decompress(b"\x00\x00\xff\xff")
+ data += self._decompressor.flush()
+ except zlib.error:
+ return CloseReason.INVALID_FRAME_PAYLOAD_DATA
+
+ if proto.client:
+ no_context_takeover = self.server_no_context_takeover
+ else:
+ no_context_takeover = self.client_no_context_takeover
+
+ if no_context_takeover:
+ self._decompressor = None
+
+ self._inbound_compressed = None
+
+ return data
+
+ def frame_outbound(
+ self,
+ proto: Union[FrameDecoder, FrameProtocol],
+ opcode: Opcode,
+ rsv: RsvBits,
+ data: bytes,
+ fin: bool,
+ ) -> Tuple[RsvBits, bytes]:
+ if not self._compressible_opcode(opcode):
+ return (rsv, data)
+
+ if opcode is not Opcode.CONTINUATION:
+ rsv = RsvBits(True, *rsv[1:])
+
+ if self._compressor is None:
+ assert opcode is not Opcode.CONTINUATION
+ if proto.client:
+ bits = self.client_max_window_bits
+ else:
+ bits = self.server_max_window_bits
+ self._compressor = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -int(bits)
+ )
+
+ data = self._compressor.compress(bytes(data))
+
+ if fin:
+ data += self._compressor.flush(zlib.Z_SYNC_FLUSH)
+ data = data[:-4]
+
+ if proto.client:
+ no_context_takeover = self.client_no_context_takeover
+ else:
+ no_context_takeover = self.server_no_context_takeover
+
+ if no_context_takeover:
+ self._compressor = None
+
+ return (rsv, data)
+
+ def __repr__(self) -> str:
+ descr = ["client_max_window_bits=%d" % self.client_max_window_bits]
+ if self.client_no_context_takeover:
+ descr.append("client_no_context_takeover")
+ descr.append("server_max_window_bits=%d" % self.server_max_window_bits)
+ if self.server_no_context_takeover:
+ descr.append("server_no_context_takeover")
+
+ return "<{} {}>".format(self.__class__.__name__, "; ".join(descr))
+
+
+#: SUPPORTED_EXTENSIONS maps all supported extension names to their class.
+#: This can be used to iterate all supported extensions of wsproto, instantiate
+#: new extensions based on their name, or check if a given extension is
+#: supported or not.
+SUPPORTED_EXTENSIONS = {PerMessageDeflate.name: PerMessageDeflate}
diff --git a/.venv/Lib/site-packages/wsproto/frame_protocol.py b/.venv/Lib/site-packages/wsproto/frame_protocol.py
new file mode 100644
index 0000000..d13a769
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto/frame_protocol.py
@@ -0,0 +1,673 @@
+"""
+wsproto/frame_protocol
+~~~~~~~~~~~~~~~~~~~~~~
+
+WebSocket frame protocol implementation.
+"""
+
+import os
+import struct
+from codecs import getincrementaldecoder, IncrementalDecoder
+from enum import IntEnum
+from typing import Generator, List, NamedTuple, Optional, Tuple, TYPE_CHECKING, Union
+
+if TYPE_CHECKING:
+ from .extensions import Extension # pragma: no cover
+
+
+_XOR_TABLE = [bytes(a ^ b for a in range(256)) for b in range(256)]
+
+
+class XorMaskerSimple:
+ def __init__(self, masking_key: bytes) -> None:
+ self._masking_key = masking_key
+
+ def process(self, data: bytes) -> bytes:
+ if data:
+ data_array = bytearray(data)
+ a, b, c, d = (_XOR_TABLE[n] for n in self._masking_key)
+ data_array[::4] = data_array[::4].translate(a)
+ data_array[1::4] = data_array[1::4].translate(b)
+ data_array[2::4] = data_array[2::4].translate(c)
+ data_array[3::4] = data_array[3::4].translate(d)
+
+ # Rotate the masking key so that the next usage continues
+ # with the next key element, rather than restarting.
+ key_rotation = len(data) % 4
+ self._masking_key = (
+ self._masking_key[key_rotation:] + self._masking_key[:key_rotation]
+ )
+
+ return bytes(data_array)
+ return data
+
+
+class XorMaskerNull:
+ def process(self, data: bytes) -> bytes:
+ return data
+
+
+# RFC6455, Section 5.2 - Base Framing Protocol
+
+# Payload length constants
+PAYLOAD_LENGTH_TWO_BYTE = 126
+PAYLOAD_LENGTH_EIGHT_BYTE = 127
+MAX_PAYLOAD_NORMAL = 125
+MAX_PAYLOAD_TWO_BYTE = 2**16 - 1
+MAX_PAYLOAD_EIGHT_BYTE = 2**64 - 1
+MAX_FRAME_PAYLOAD = MAX_PAYLOAD_EIGHT_BYTE
+
+# MASK and PAYLOAD LEN are packed into a byte
+MASK_MASK = 0x80
+PAYLOAD_LEN_MASK = 0x7F
+
+# FIN, RSV[123] and OPCODE are packed into a single byte
+FIN_MASK = 0x80
+RSV1_MASK = 0x40
+RSV2_MASK = 0x20
+RSV3_MASK = 0x10
+OPCODE_MASK = 0x0F
+
+
+class Opcode(IntEnum):
+ """
+ RFC 6455, Section 5.2 - Base Framing Protocol
+ """
+
+ #: Continuation frame
+ CONTINUATION = 0x0
+
+ #: Text message
+ TEXT = 0x1
+
+ #: Binary message
+ BINARY = 0x2
+
+ #: Close frame
+ CLOSE = 0x8
+
+ #: Ping frame
+ PING = 0x9
+
+ #: Pong frame
+ PONG = 0xA
+
+ def iscontrol(self) -> bool:
+ return bool(self & 0x08)
+
+
+class CloseReason(IntEnum):
+ """
+ RFC 6455, Section 7.4.1 - Defined Status Codes
+ """
+
+ #: indicates a normal closure, meaning that the purpose for
+ #: which the connection was established has been fulfilled.
+ NORMAL_CLOSURE = 1000
+
+ #: indicates that an endpoint is "going away", such as a server
+ #: going down or a browser having navigated away from a page.
+ GOING_AWAY = 1001
+
+ #: indicates that an endpoint is terminating the connection due
+ #: to a protocol error.
+ PROTOCOL_ERROR = 1002
+
+ #: indicates that an endpoint is terminating the connection
+ #: because it has received a type of data it cannot accept (e.g., an
+ #: endpoint that understands only text data MAY send this if it
+ #: receives a binary message).
+ UNSUPPORTED_DATA = 1003
+
+ #: Reserved. The specific meaning might be defined in the future.
+ # DON'T DEFINE THIS: RESERVED_1004 = 1004
+
+ #: is a reserved value and MUST NOT be set as a status code in a
+ #: Close control frame by an endpoint. It is designated for use in
+ #: applications expecting a status code to indicate that no status
+ #: code was actually present.
+ NO_STATUS_RCVD = 1005
+
+ #: is a reserved value and MUST NOT be set as a status code in a
+ #: Close control frame by an endpoint. It is designated for use in
+ #: applications expecting a status code to indicate that the
+ #: connection was closed abnormally, e.g., without sending or
+ #: receiving a Close control frame.
+ ABNORMAL_CLOSURE = 1006
+
+ #: indicates that an endpoint is terminating the connection
+ #: because it has received data within a message that was not
+ #: consistent with the type of the message (e.g., non-UTF-8 [RFC3629]
+ #: data within a text message).
+ INVALID_FRAME_PAYLOAD_DATA = 1007
+
+ #: indicates that an endpoint is terminating the connection
+ #: because it has received a message that violates its policy. This
+ #: is a generic status code that can be returned when there is no
+ #: other more suitable status code (e.g., 1003 or 1009) or if there
+ #: is a need to hide specific details about the policy.
+ POLICY_VIOLATION = 1008
+
+ #: indicates that an endpoint is terminating the connection
+ #: because it has received a message that is too big for it to
+ #: process.
+ MESSAGE_TOO_BIG = 1009
+
+ #: indicates that an endpoint (client) is terminating the
+ #: connection because it has expected the server to negotiate one or
+ #: more extension, but the server didn't return them in the response
+ #: message of the WebSocket handshake. The list of extensions that
+ #: are needed SHOULD appear in the /reason/ part of the Close frame.
+ #: Note that this status code is not used by the server, because it
+ #: can fail the WebSocket handshake instead.
+ MANDATORY_EXT = 1010
+
+ #: indicates that a server is terminating the connection because
+ #: it encountered an unexpected condition that prevented it from
+ #: fulfilling the request.
+ INTERNAL_ERROR = 1011
+
+ #: Server/service is restarting
+ #: (not part of RFC6455)
+ SERVICE_RESTART = 1012
+
+ #: Temporary server condition forced blocking client's request
+ #: (not part of RFC6455)
+ TRY_AGAIN_LATER = 1013
+
+ #: is a reserved value and MUST NOT be set as a status code in a
+ #: Close control frame by an endpoint. It is designated for use in
+ #: applications expecting a status code to indicate that the
+ #: connection was closed due to a failure to perform a TLS handshake
+ #: (e.g., the server certificate can't be verified).
+ TLS_HANDSHAKE_FAILED = 1015
+
+
+# RFC 6455, Section 7.4.1 - Defined Status Codes
+LOCAL_ONLY_CLOSE_REASONS = (
+ CloseReason.NO_STATUS_RCVD,
+ CloseReason.ABNORMAL_CLOSURE,
+ CloseReason.TLS_HANDSHAKE_FAILED,
+)
+
+
+# RFC 6455, Section 7.4.2 - Status Code Ranges
+MIN_CLOSE_REASON = 1000
+MIN_PROTOCOL_CLOSE_REASON = 1000
+MAX_PROTOCOL_CLOSE_REASON = 2999
+MIN_LIBRARY_CLOSE_REASON = 3000
+MAX_LIBRARY_CLOSE_REASON = 3999
+MIN_PRIVATE_CLOSE_REASON = 4000
+MAX_PRIVATE_CLOSE_REASON = 4999
+MAX_CLOSE_REASON = 4999
+
+
+NULL_MASK = struct.pack("!I", 0)
+
+
+class ParseFailed(Exception):
+ def __init__(
+ self, msg: str, code: CloseReason = CloseReason.PROTOCOL_ERROR
+ ) -> None:
+ super().__init__(msg)
+ self.code = code
+
+
+class RsvBits(NamedTuple):
+ rsv1: bool
+ rsv2: bool
+ rsv3: bool
+
+
+class Header(NamedTuple):
+ fin: bool
+ rsv: RsvBits
+ opcode: Opcode
+ payload_len: int
+ masking_key: Optional[bytes]
+
+
+class Frame(NamedTuple):
+ opcode: Opcode
+ payload: Union[bytes, str, Tuple[int, str]]
+ frame_finished: bool
+ message_finished: bool
+
+
+def _truncate_utf8(data: bytes, nbytes: int) -> bytes:
+ if len(data) <= nbytes:
+ return data
+
+ # Truncate
+ data = data[:nbytes]
+ # But we might have cut a codepoint in half, in which case we want to
+ # discard the partial character so the data is at least
+ # well-formed. This is a little inefficient since it processes the
+ # whole message twice when in theory we could just peek at the last
+ # few characters, but since this is only used for close messages (max
+ # length = 125 bytes) it really doesn't matter.
+ data = data.decode("utf-8", errors="ignore").encode("utf-8")
+ return data
+
+
+class Buffer:
+ def __init__(self, initial_bytes: Optional[bytes] = None) -> None:
+ self.buffer = bytearray()
+ self.bytes_used = 0
+ if initial_bytes:
+ self.feed(initial_bytes)
+
+ def feed(self, new_bytes: bytes) -> None:
+ self.buffer += new_bytes
+
+ def consume_at_most(self, nbytes: int) -> bytes:
+ if not nbytes:
+ return bytearray()
+
+ data = self.buffer[self.bytes_used : self.bytes_used + nbytes]
+ self.bytes_used += len(data)
+ return data
+
+ def consume_exactly(self, nbytes: int) -> Optional[bytes]:
+ if len(self.buffer) - self.bytes_used < nbytes:
+ return None
+
+ return self.consume_at_most(nbytes)
+
+ def commit(self) -> None:
+ # In CPython 3.4+, del[:n] is amortized O(n), *not* quadratic
+ del self.buffer[: self.bytes_used]
+ self.bytes_used = 0
+
+ def rollback(self) -> None:
+ self.bytes_used = 0
+
+ def __len__(self) -> int:
+ return len(self.buffer)
+
+
+class MessageDecoder:
+ def __init__(self) -> None:
+ self.opcode: Optional[Opcode] = None
+ self.decoder: Optional[IncrementalDecoder] = None
+
+ def process_frame(self, frame: Frame) -> Frame:
+ assert not frame.opcode.iscontrol()
+
+ if self.opcode is None:
+ if frame.opcode is Opcode.CONTINUATION:
+ raise ParseFailed("unexpected CONTINUATION")
+ self.opcode = frame.opcode
+ elif frame.opcode is not Opcode.CONTINUATION:
+ raise ParseFailed("expected CONTINUATION, got %r" % frame.opcode)
+
+ if frame.opcode is Opcode.TEXT:
+ self.decoder = getincrementaldecoder("utf-8")()
+
+ finished = frame.frame_finished and frame.message_finished
+
+ if self.decoder is None:
+ data = frame.payload
+ else:
+ assert isinstance(frame.payload, (bytes, bytearray))
+ try:
+ data = self.decoder.decode(frame.payload, finished)
+ except UnicodeDecodeError as exc:
+ raise ParseFailed(str(exc), CloseReason.INVALID_FRAME_PAYLOAD_DATA)
+
+ frame = Frame(self.opcode, data, frame.frame_finished, finished)
+
+ if finished:
+ self.opcode = None
+ self.decoder = None
+
+ return frame
+
+
+class FrameDecoder:
+ def __init__(
+ self, client: bool, extensions: Optional[List["Extension"]] = None
+ ) -> None:
+ self.client = client
+ self.extensions = extensions or []
+
+ self.buffer = Buffer()
+
+ self.header: Optional[Header] = None
+ self.effective_opcode: Optional[Opcode] = None
+ self.masker: Union[None, XorMaskerNull, XorMaskerSimple] = None
+ self.payload_required = 0
+ self.payload_consumed = 0
+
+ def receive_bytes(self, data: bytes) -> None:
+ self.buffer.feed(data)
+
+ def process_buffer(self) -> Optional[Frame]:
+ if not self.header:
+ if not self.parse_header():
+ return None
+ # parse_header() sets these.
+ assert self.header is not None
+ assert self.masker is not None
+ assert self.effective_opcode is not None
+
+ if len(self.buffer) < self.payload_required:
+ return None
+
+ payload_remaining = self.header.payload_len - self.payload_consumed
+ payload = self.buffer.consume_at_most(payload_remaining)
+ if not payload and self.header.payload_len > 0:
+ return None
+ self.buffer.commit()
+
+ self.payload_consumed += len(payload)
+ finished = self.payload_consumed == self.header.payload_len
+
+ payload = self.masker.process(payload)
+
+ for extension in self.extensions:
+ payload_ = extension.frame_inbound_payload_data(self, payload)
+ if isinstance(payload_, CloseReason):
+ raise ParseFailed("error in extension", payload_)
+ payload = payload_
+
+ if finished:
+ final = bytearray()
+ for extension in self.extensions:
+ result = extension.frame_inbound_complete(self, self.header.fin)
+ if isinstance(result, CloseReason):
+ raise ParseFailed("error in extension", result)
+ if result is not None:
+ final += result
+ payload += final
+
+ frame = Frame(self.effective_opcode, payload, finished, self.header.fin)
+
+ if finished:
+ self.header = None
+ self.effective_opcode = None
+ self.masker = None
+ else:
+ self.effective_opcode = Opcode.CONTINUATION
+
+ return frame
+
+ def parse_header(self) -> bool:
+ data = self.buffer.consume_exactly(2)
+ if data is None:
+ self.buffer.rollback()
+ return False
+
+ fin = bool(data[0] & FIN_MASK)
+ rsv = RsvBits(
+ bool(data[0] & RSV1_MASK),
+ bool(data[0] & RSV2_MASK),
+ bool(data[0] & RSV3_MASK),
+ )
+ opcode = data[0] & OPCODE_MASK
+ try:
+ opcode = Opcode(opcode)
+ except ValueError:
+ raise ParseFailed(f"Invalid opcode {opcode:#x}")
+
+ if opcode.iscontrol() and not fin:
+ raise ParseFailed("Invalid attempt to fragment control frame")
+
+ has_mask = bool(data[1] & MASK_MASK)
+ payload_len_short = data[1] & PAYLOAD_LEN_MASK
+ payload_len = self.parse_extended_payload_length(opcode, payload_len_short)
+ if payload_len is None:
+ self.buffer.rollback()
+ return False
+
+ self.extension_processing(opcode, rsv, payload_len)
+
+ if has_mask and self.client:
+ raise ParseFailed("client received unexpected masked frame")
+ if not has_mask and not self.client:
+ raise ParseFailed("server received unexpected unmasked frame")
+ if has_mask:
+ masking_key = self.buffer.consume_exactly(4)
+ if masking_key is None:
+ self.buffer.rollback()
+ return False
+ self.masker = XorMaskerSimple(masking_key)
+ else:
+ self.masker = XorMaskerNull()
+
+ self.buffer.commit()
+ self.header = Header(fin, rsv, opcode, payload_len, None)
+ self.effective_opcode = self.header.opcode
+ if self.header.opcode.iscontrol():
+ self.payload_required = payload_len
+ else:
+ self.payload_required = 0
+ self.payload_consumed = 0
+ return True
+
+ def parse_extended_payload_length(
+ self, opcode: Opcode, payload_len: int
+ ) -> Optional[int]:
+ if opcode.iscontrol() and payload_len > MAX_PAYLOAD_NORMAL:
+ raise ParseFailed("Control frame with payload len > 125")
+ if payload_len == PAYLOAD_LENGTH_TWO_BYTE:
+ data = self.buffer.consume_exactly(2)
+ if data is None:
+ return None
+ (payload_len,) = struct.unpack("!H", data)
+ if payload_len <= MAX_PAYLOAD_NORMAL:
+ raise ParseFailed(
+ "Payload length used 2 bytes when 1 would have sufficed"
+ )
+ elif payload_len == PAYLOAD_LENGTH_EIGHT_BYTE:
+ data = self.buffer.consume_exactly(8)
+ if data is None:
+ return None
+ (payload_len,) = struct.unpack("!Q", data)
+ if payload_len <= MAX_PAYLOAD_TWO_BYTE:
+ raise ParseFailed(
+ "Payload length used 8 bytes when 2 would have sufficed"
+ )
+ if payload_len >> 63:
+ # I'm not sure why this is illegal, but that's what the RFC
+ # says, so...
+ raise ParseFailed("8-byte payload length with non-zero MSB")
+
+ return payload_len
+
+ def extension_processing(
+ self, opcode: Opcode, rsv: RsvBits, payload_len: int
+ ) -> None:
+ rsv_used = [False, False, False]
+ for extension in self.extensions:
+ result = extension.frame_inbound_header(self, opcode, rsv, payload_len)
+ if isinstance(result, CloseReason):
+ raise ParseFailed("error in extension", result)
+ for bit, used in enumerate(result):
+ if used:
+ rsv_used[bit] = True
+ for expected, found in zip(rsv_used, rsv):
+ if found and not expected:
+ raise ParseFailed("Reserved bit set unexpectedly")
+
+
+class FrameProtocol:
+ def __init__(self, client: bool, extensions: List["Extension"]) -> None:
+ self.client = client
+ self.extensions = [ext for ext in extensions if ext.enabled()]
+
+ # Global state
+ self._frame_decoder = FrameDecoder(self.client, self.extensions)
+ self._message_decoder = MessageDecoder()
+ self._parse_more = self._parse_more_gen()
+
+ self._outbound_opcode: Optional[Opcode] = None
+
+ def _process_close(self, frame: Frame) -> Frame:
+ data = frame.payload
+ assert isinstance(data, (bytes, bytearray))
+
+ if not data:
+ # "If this Close control frame contains no status code, _The
+ # WebSocket Connection Close Code_ is considered to be 1005"
+ data = (CloseReason.NO_STATUS_RCVD, "")
+ elif len(data) == 1:
+ raise ParseFailed("CLOSE with 1 byte payload")
+ else:
+ (code,) = struct.unpack("!H", data[:2])
+ if code < MIN_CLOSE_REASON or code > MAX_CLOSE_REASON:
+ raise ParseFailed("CLOSE with invalid code")
+ try:
+ code = CloseReason(code)
+ except ValueError:
+ pass
+ if code in LOCAL_ONLY_CLOSE_REASONS:
+ raise ParseFailed("remote CLOSE with local-only reason")
+ if not isinstance(code, CloseReason) and code <= MAX_PROTOCOL_CLOSE_REASON:
+ raise ParseFailed("CLOSE with unknown reserved code")
+ try:
+ reason = data[2:].decode("utf-8")
+ except UnicodeDecodeError as exc:
+ raise ParseFailed(
+ "Error decoding CLOSE reason: " + str(exc),
+ CloseReason.INVALID_FRAME_PAYLOAD_DATA,
+ )
+ data = (code, reason)
+
+ return Frame(frame.opcode, data, frame.frame_finished, frame.message_finished)
+
+ def _parse_more_gen(self) -> Generator[Optional[Frame], None, None]:
+ # Consume as much as we can from self._buffer, yielding events, and
+ # then yield None when we need more data. Or raise ParseFailed.
+
+ # XX FIXME this should probably be refactored so that we never see
+ # disabled extensions in the first place...
+ self.extensions = [ext for ext in self.extensions if ext.enabled()]
+ closed = False
+
+ while not closed:
+ frame = self._frame_decoder.process_buffer()
+
+ if frame is not None:
+ if not frame.opcode.iscontrol():
+ frame = self._message_decoder.process_frame(frame)
+ elif frame.opcode == Opcode.CLOSE:
+ frame = self._process_close(frame)
+ closed = True
+
+ yield frame
+
+ def receive_bytes(self, data: bytes) -> None:
+ self._frame_decoder.receive_bytes(data)
+
+ def received_frames(self) -> Generator[Frame, None, None]:
+ for event in self._parse_more:
+ if event is None:
+ break
+ else:
+ yield event
+
+ def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> bytes:
+ payload = bytearray()
+ if code is CloseReason.NO_STATUS_RCVD:
+ code = None
+ if code is None and reason:
+ raise TypeError("cannot specify a reason without a code")
+ if code in LOCAL_ONLY_CLOSE_REASONS:
+ code = CloseReason.NORMAL_CLOSURE
+ if code is not None:
+ payload += bytearray(struct.pack("!H", code))
+ if reason is not None:
+ payload += _truncate_utf8(
+ reason.encode("utf-8"), MAX_PAYLOAD_NORMAL - 2
+ )
+
+ return self._serialize_frame(Opcode.CLOSE, payload)
+
+ def ping(self, payload: bytes = b"") -> bytes:
+ return self._serialize_frame(Opcode.PING, payload)
+
+ def pong(self, payload: bytes = b"") -> bytes:
+ return self._serialize_frame(Opcode.PONG, payload)
+
+ def send_data(
+ self, payload: Union[bytes, bytearray, str] = b"", fin: bool = True
+ ) -> bytes:
+ if isinstance(payload, (bytes, bytearray, memoryview)):
+ opcode = Opcode.BINARY
+ elif isinstance(payload, str):
+ opcode = Opcode.TEXT
+ payload = payload.encode("utf-8")
+ else:
+ raise ValueError("Must provide bytes or text")
+
+ if self._outbound_opcode is None:
+ self._outbound_opcode = opcode
+ elif self._outbound_opcode is not opcode:
+ raise TypeError("Data type mismatch inside message")
+ else:
+ opcode = Opcode.CONTINUATION
+
+ if fin:
+ self._outbound_opcode = None
+
+ return self._serialize_frame(opcode, payload, fin)
+
+ def _make_fin_rsv_opcode(self, fin: bool, rsv: RsvBits, opcode: Opcode) -> int:
+ fin_bits = int(fin) << 7
+ rsv_bits = (int(rsv.rsv1) << 6) + (int(rsv.rsv2) << 5) + (int(rsv.rsv3) << 4)
+ opcode_bits = int(opcode)
+
+ return fin_bits | rsv_bits | opcode_bits
+
+ def _serialize_frame(
+ self, opcode: Opcode, payload: bytes = b"", fin: bool = True
+ ) -> bytes:
+ rsv = RsvBits(False, False, False)
+ for extension in reversed(self.extensions):
+ rsv, payload = extension.frame_outbound(self, opcode, rsv, payload, fin)
+
+ fin_rsv_opcode = self._make_fin_rsv_opcode(fin, rsv, opcode)
+
+ payload_length = len(payload)
+ quad_payload = False
+ if payload_length <= MAX_PAYLOAD_NORMAL:
+ first_payload = payload_length
+ second_payload = None
+ elif payload_length <= MAX_PAYLOAD_TWO_BYTE:
+ first_payload = PAYLOAD_LENGTH_TWO_BYTE
+ second_payload = payload_length
+ else:
+ first_payload = PAYLOAD_LENGTH_EIGHT_BYTE
+ second_payload = payload_length
+ quad_payload = True
+
+ if self.client:
+ first_payload |= 1 << 7
+
+ header = bytearray([fin_rsv_opcode, first_payload])
+ if second_payload is not None:
+ if opcode.iscontrol():
+ raise ValueError("payload too long for control frame")
+ if quad_payload:
+ header += bytearray(struct.pack("!Q", second_payload))
+ else:
+ header += bytearray(struct.pack("!H", second_payload))
+
+ if self.client:
+ # "The masking key is a 32-bit value chosen at random by the
+ # client. When preparing a masked frame, the client MUST pick a
+ # fresh masking key from the set of allowed 32-bit values. The
+ # masking key needs to be unpredictable; thus, the masking key
+ # MUST be derived from a strong source of entropy, and the masking
+ # key for a given frame MUST NOT make it simple for a server/proxy
+ # to predict the masking key for a subsequent frame. The
+ # unpredictability of the masking key is essential to prevent
+ # authors of malicious applications from selecting the bytes that
+ # appear on the wire."
+ # -- https://tools.ietf.org/html/rfc6455#section-5.3
+ masking_key = os.urandom(4)
+ masker = XorMaskerSimple(masking_key)
+ return header + masking_key + masker.process(payload)
+
+ return header + payload
diff --git a/.venv/Lib/site-packages/wsproto/handshake.py b/.venv/Lib/site-packages/wsproto/handshake.py
new file mode 100644
index 0000000..c456939
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto/handshake.py
@@ -0,0 +1,491 @@
+"""
+wsproto/handshake
+~~~~~~~~~~~~~~~~~~
+
+An implementation of WebSocket handshakes.
+"""
+from collections import deque
+from typing import (
+ cast,
+ Deque,
+ Dict,
+ Generator,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Union,
+)
+
+import h11
+
+from .connection import Connection, ConnectionState, ConnectionType
+from .events import AcceptConnection, Event, RejectConnection, RejectData, Request
+from .extensions import Extension
+from .typing import Headers
+from .utilities import (
+ generate_accept_token,
+ generate_nonce,
+ LocalProtocolError,
+ normed_header_dict,
+ RemoteProtocolError,
+ split_comma_header,
+)
+
+# RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake
+WEBSOCKET_VERSION = b"13"
+
+
+class H11Handshake:
+ """A Handshake implementation for HTTP/1.1 connections."""
+
+ def __init__(self, connection_type: ConnectionType) -> None:
+ self.client = connection_type is ConnectionType.CLIENT
+ self._state = ConnectionState.CONNECTING
+
+ if self.client:
+ self._h11_connection = h11.Connection(h11.CLIENT)
+ else:
+ self._h11_connection = h11.Connection(h11.SERVER)
+
+ self._connection: Optional[Connection] = None
+ self._events: Deque[Event] = deque()
+ self._initiating_request: Optional[Request] = None
+ self._nonce: Optional[bytes] = None
+
+ @property
+ def state(self) -> ConnectionState:
+ return self._state
+
+ @property
+ def connection(self) -> Optional[Connection]:
+ """Return the established connection.
+
+ This will either return the connection or raise a
+ LocalProtocolError if the connection has not yet been
+ established.
+
+ :rtype: h11.Connection
+ """
+ return self._connection
+
+ def initiate_upgrade_connection(
+ self, headers: Headers, path: Union[bytes, str]
+ ) -> None:
+ """Initiate an upgrade connection.
+
+ This should be used if the request has already be received and
+ parsed.
+
+ :param list headers: HTTP headers represented as a list of 2-tuples.
+ :param str path: A URL path.
+ """
+ if self.client:
+ raise LocalProtocolError(
+ "Cannot initiate an upgrade connection when acting as the client"
+ )
+ upgrade_request = h11.Request(method=b"GET", target=path, headers=headers)
+ h11_client = h11.Connection(h11.CLIENT)
+ self.receive_data(h11_client.send(upgrade_request))
+
+ def send(self, event: Event) -> bytes:
+ """Send an event to the remote.
+
+ This will return the bytes to send based on the event or raise
+ a LocalProtocolError if the event is not valid given the
+ state.
+
+ :returns: Data to send to the WebSocket peer.
+ :rtype: bytes
+ """
+ data = b""
+ if isinstance(event, Request):
+ data += self._initiate_connection(event)
+ elif isinstance(event, AcceptConnection):
+ data += self._accept(event)
+ elif isinstance(event, RejectConnection):
+ data += self._reject(event)
+ elif isinstance(event, RejectData):
+ data += self._send_reject_data(event)
+ else:
+ raise LocalProtocolError(
+ f"Event {event} cannot be sent during the handshake"
+ )
+ return data
+
+ def receive_data(self, data: Optional[bytes]) -> None:
+ """Receive data from the remote.
+
+ A list of events that the remote peer triggered by sending
+ this data can be retrieved with :meth:`events`.
+
+ :param bytes data: Data received from the WebSocket peer.
+ """
+ self._h11_connection.receive_data(data or b"")
+ while True:
+ try:
+ event = self._h11_connection.next_event()
+ except h11.RemoteProtocolError:
+ raise RemoteProtocolError(
+ "Bad HTTP message", event_hint=RejectConnection()
+ )
+ if (
+ isinstance(event, h11.ConnectionClosed)
+ or event is h11.NEED_DATA
+ or event is h11.PAUSED
+ ):
+ break
+
+ if self.client:
+ if isinstance(event, h11.InformationalResponse):
+ if event.status_code == 101:
+ self._events.append(self._establish_client_connection(event))
+ else:
+ self._events.append(
+ RejectConnection(
+ headers=list(event.headers),
+ status_code=event.status_code,
+ has_body=False,
+ )
+ )
+ self._state = ConnectionState.CLOSED
+ elif isinstance(event, h11.Response):
+ self._state = ConnectionState.REJECTING
+ self._events.append(
+ RejectConnection(
+ headers=list(event.headers),
+ status_code=event.status_code,
+ has_body=True,
+ )
+ )
+ elif isinstance(event, h11.Data):
+ self._events.append(
+ RejectData(data=event.data, body_finished=False)
+ )
+ elif isinstance(event, h11.EndOfMessage):
+ self._events.append(RejectData(data=b"", body_finished=True))
+ self._state = ConnectionState.CLOSED
+ else:
+ if isinstance(event, h11.Request):
+ self._events.append(self._process_connection_request(event))
+
+ def events(self) -> Generator[Event, None, None]:
+ """Return a generator that provides any events that have been generated
+ by protocol activity.
+
+ :returns: a generator that yields H11 events.
+ """
+ while self._events:
+ yield self._events.popleft()
+
+ # Server mode methods
+
+ def _process_connection_request( # noqa: MC0001
+ self, event: h11.Request
+ ) -> Request:
+ if event.method != b"GET":
+ raise RemoteProtocolError(
+ "Request method must be GET", event_hint=RejectConnection()
+ )
+ connection_tokens = None
+ extensions: List[str] = []
+ host = None
+ key = None
+ subprotocols: List[str] = []
+ upgrade = b""
+ version = None
+ headers: Headers = []
+ for name, value in event.headers:
+ name = name.lower()
+ if name == b"connection":
+ connection_tokens = split_comma_header(value)
+ elif name == b"host":
+ host = value.decode("idna")
+ continue # Skip appending to headers
+ elif name == b"sec-websocket-extensions":
+ extensions.extend(split_comma_header(value))
+ continue # Skip appending to headers
+ elif name == b"sec-websocket-key":
+ key = value
+ elif name == b"sec-websocket-protocol":
+ subprotocols.extend(split_comma_header(value))
+ continue # Skip appending to headers
+ elif name == b"sec-websocket-version":
+ version = value
+ elif name == b"upgrade":
+ upgrade = value
+ headers.append((name, value))
+ if connection_tokens is None or not any(
+ token.lower() == "upgrade" for token in connection_tokens
+ ):
+ raise RemoteProtocolError(
+ "Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
+ )
+ if version != WEBSOCKET_VERSION:
+ raise RemoteProtocolError(
+ "Missing header, 'Sec-WebSocket-Version'",
+ event_hint=RejectConnection(
+ headers=[(b"Sec-WebSocket-Version", WEBSOCKET_VERSION)],
+ status_code=426 if version else 400,
+ ),
+ )
+ if key is None:
+ raise RemoteProtocolError(
+ "Missing header, 'Sec-WebSocket-Key'", event_hint=RejectConnection()
+ )
+ if upgrade.lower() != b"websocket":
+ raise RemoteProtocolError(
+ "Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
+ )
+ if host is None:
+ raise RemoteProtocolError(
+ "Missing header, 'Host'", event_hint=RejectConnection()
+ )
+
+ self._initiating_request = Request(
+ extensions=extensions,
+ extra_headers=headers,
+ host=host,
+ subprotocols=subprotocols,
+ target=event.target.decode("ascii"),
+ )
+ return self._initiating_request
+
+ def _accept(self, event: AcceptConnection) -> bytes:
+ # _accept is always called after _process_connection_request.
+ assert self._initiating_request is not None
+ request_headers = normed_header_dict(self._initiating_request.extra_headers)
+
+ nonce = request_headers[b"sec-websocket-key"]
+ accept_token = generate_accept_token(nonce)
+
+ headers = [
+ (b"Upgrade", b"WebSocket"),
+ (b"Connection", b"Upgrade"),
+ (b"Sec-WebSocket-Accept", accept_token),
+ ]
+
+ if event.subprotocol is not None:
+ if event.subprotocol not in self._initiating_request.subprotocols:
+ raise LocalProtocolError(f"unexpected subprotocol {event.subprotocol}")
+ headers.append(
+ (b"Sec-WebSocket-Protocol", event.subprotocol.encode("ascii"))
+ )
+
+ if event.extensions:
+ accepts = server_extensions_handshake(
+ cast(Sequence[str], self._initiating_request.extensions),
+ event.extensions,
+ )
+ if accepts:
+ headers.append((b"Sec-WebSocket-Extensions", accepts))
+
+ response = h11.InformationalResponse(
+ status_code=101, headers=headers + event.extra_headers
+ )
+ self._connection = Connection(
+ ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
+ event.extensions,
+ )
+ self._state = ConnectionState.OPEN
+ return self._h11_connection.send(response) or b""
+
+ def _reject(self, event: RejectConnection) -> bytes:
+ if self.state != ConnectionState.CONNECTING:
+ raise LocalProtocolError(
+ "Connection cannot be rejected in state %s" % self.state
+ )
+
+ headers = list(event.headers)
+ if not event.has_body:
+ headers.append((b"content-length", b"0"))
+ response = h11.Response(status_code=event.status_code, headers=headers)
+ data = self._h11_connection.send(response) or b""
+ self._state = ConnectionState.REJECTING
+ if not event.has_body:
+ data += self._h11_connection.send(h11.EndOfMessage()) or b""
+ self._state = ConnectionState.CLOSED
+ return data
+
+ def _send_reject_data(self, event: RejectData) -> bytes:
+ if self.state != ConnectionState.REJECTING:
+ raise LocalProtocolError(
+ f"Cannot send rejection data in state {self.state}"
+ )
+
+ data = self._h11_connection.send(h11.Data(data=event.data)) or b""
+ if event.body_finished:
+ data += self._h11_connection.send(h11.EndOfMessage()) or b""
+ self._state = ConnectionState.CLOSED
+ return data
+
+ # Client mode methods
+
+ def _initiate_connection(self, request: Request) -> bytes:
+ self._initiating_request = request
+ self._nonce = generate_nonce()
+
+ headers = [
+ (b"Host", request.host.encode("idna")),
+ (b"Upgrade", b"WebSocket"),
+ (b"Connection", b"Upgrade"),
+ (b"Sec-WebSocket-Key", self._nonce),
+ (b"Sec-WebSocket-Version", WEBSOCKET_VERSION),
+ ]
+
+ if request.subprotocols:
+ headers.append(
+ (
+ b"Sec-WebSocket-Protocol",
+ (", ".join(request.subprotocols)).encode("ascii"),
+ )
+ )
+
+ if request.extensions:
+ offers: Dict[str, Union[str, bool]] = {}
+ for e in request.extensions:
+ assert isinstance(e, Extension)
+ offers[e.name] = e.offer()
+ extensions = []
+ for name, params in offers.items():
+ bname = name.encode("ascii")
+ if isinstance(params, bool):
+ if params:
+ extensions.append(bname)
+ else:
+ extensions.append(b"%s; %s" % (bname, params.encode("ascii")))
+ if extensions:
+ headers.append((b"Sec-WebSocket-Extensions", b", ".join(extensions)))
+
+ upgrade = h11.Request(
+ method=b"GET",
+ target=request.target.encode("ascii"),
+ headers=headers + request.extra_headers,
+ )
+ return self._h11_connection.send(upgrade) or b""
+
+ def _establish_client_connection(
+ self, event: h11.InformationalResponse
+ ) -> AcceptConnection: # noqa: MC0001
+ # _establish_client_connection is always called after _initiate_connection.
+ assert self._initiating_request is not None
+ assert self._nonce is not None
+
+ accept = None
+ connection_tokens = None
+ accepts: List[str] = []
+ subprotocol = None
+ upgrade = b""
+ headers: Headers = []
+ for name, value in event.headers:
+ name = name.lower()
+ if name == b"connection":
+ connection_tokens = split_comma_header(value)
+ continue # Skip appending to headers
+ elif name == b"sec-websocket-extensions":
+ accepts = split_comma_header(value)
+ continue # Skip appending to headers
+ elif name == b"sec-websocket-accept":
+ accept = value
+ continue # Skip appending to headers
+ elif name == b"sec-websocket-protocol":
+ subprotocol = value.decode("ascii")
+ continue # Skip appending to headers
+ elif name == b"upgrade":
+ upgrade = value
+ continue # Skip appending to headers
+ headers.append((name, value))
+
+ if connection_tokens is None or not any(
+ token.lower() == "upgrade" for token in connection_tokens
+ ):
+ raise RemoteProtocolError(
+ "Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
+ )
+ if upgrade.lower() != b"websocket":
+ raise RemoteProtocolError(
+ "Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
+ )
+ accept_token = generate_accept_token(self._nonce)
+ if accept != accept_token:
+ raise RemoteProtocolError("Bad accept token", event_hint=RejectConnection())
+ if subprotocol is not None:
+ if subprotocol not in self._initiating_request.subprotocols:
+ raise RemoteProtocolError(
+ f"unrecognized subprotocol {subprotocol}",
+ event_hint=RejectConnection(),
+ )
+ extensions = client_extensions_handshake(
+ accepts, cast(Sequence[Extension], self._initiating_request.extensions)
+ )
+
+ self._connection = Connection(
+ ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
+ extensions,
+ self._h11_connection.trailing_data[0],
+ )
+ self._state = ConnectionState.OPEN
+ return AcceptConnection(
+ extensions=extensions, extra_headers=headers, subprotocol=subprotocol
+ )
+
+ def __repr__(self) -> str:
+ return "{}(client={}, state={})".format(
+ self.__class__.__name__, self.client, self.state
+ )
+
+
+def server_extensions_handshake(
+ requested: Iterable[str], supported: List[Extension]
+) -> Optional[bytes]:
+ """Agree on the extensions to use returning an appropriate header value.
+
+ This returns None if there are no agreed extensions
+ """
+ accepts: Dict[str, Union[bool, bytes]] = {}
+ for offer in requested:
+ name = offer.split(";", 1)[0].strip()
+ for extension in supported:
+ if extension.name == name:
+ accept = extension.accept(offer)
+ if isinstance(accept, bool):
+ if accept:
+ accepts[extension.name] = True
+ elif accept is not None:
+ accepts[extension.name] = accept.encode("ascii")
+
+ if accepts:
+ extensions: List[bytes] = []
+ for name, params in accepts.items():
+ name_bytes = name.encode("ascii")
+ if isinstance(params, bool):
+ assert params
+ extensions.append(name_bytes)
+ else:
+ if params == b"":
+ extensions.append(b"%s" % (name_bytes))
+ else:
+ extensions.append(b"%s; %s" % (name_bytes, params))
+ return b", ".join(extensions)
+
+ return None
+
+
+def client_extensions_handshake(
+ accepted: Iterable[str], supported: Sequence[Extension]
+) -> List[Extension]:
+ # This raises RemoteProtocolError is the accepted extension is not
+ # supported.
+ extensions = []
+ for accept in accepted:
+ name = accept.split(";", 1)[0].strip()
+ for extension in supported:
+ if extension.name == name:
+ extension.finalize(accept)
+ extensions.append(extension)
+ break
+ else:
+ raise RemoteProtocolError(
+ f"unrecognized extension {name}", event_hint=RejectConnection()
+ )
+ return extensions
diff --git a/.venv/Lib/site-packages/wsproto/py.typed b/.venv/Lib/site-packages/wsproto/py.typed
new file mode 100644
index 0000000..f5642f7
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto/py.typed
@@ -0,0 +1 @@
+Marker
diff --git a/.venv/Lib/site-packages/wsproto/typing.py b/.venv/Lib/site-packages/wsproto/typing.py
new file mode 100644
index 0000000..a44b27e
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto/typing.py
@@ -0,0 +1,3 @@
+from typing import List, Tuple
+
+Headers = List[Tuple[bytes, bytes]]
diff --git a/.venv/Lib/site-packages/wsproto/utilities.py b/.venv/Lib/site-packages/wsproto/utilities.py
new file mode 100644
index 0000000..7cf53d1
--- /dev/null
+++ b/.venv/Lib/site-packages/wsproto/utilities.py
@@ -0,0 +1,88 @@
+"""
+wsproto/utilities
+~~~~~~~~~~~~~~~~~
+
+Utility functions that do not belong in a separate module.
+"""
+import base64
+import hashlib
+import os
+from typing import Dict, List, Optional, Union
+
+from h11._headers import Headers as H11Headers
+
+from .events import Event
+from .typing import Headers
+
+# RFC6455, Section 1.3 - Opening Handshake
+ACCEPT_GUID = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+
+class ProtocolError(Exception):
+ pass
+
+
+class LocalProtocolError(ProtocolError):
+ """Indicates an error due to local/programming errors.
+
+ This is raised when the connection is asked to do something that
+ is either incompatible with the state or the websocket standard.
+
+ """
+
+ pass # noqa
+
+
+class RemoteProtocolError(ProtocolError):
+ """Indicates an error due to the remote's actions.
+
+ This is raised when processing the bytes from the remote if the
+ remote has sent data that is incompatible with the websocket
+ standard.
+
+ .. attribute:: event_hint
+
+ This is a suggested wsproto Event to send to the client based
+ on the error. It could be None if no hint is available.
+
+ """
+
+ def __init__(self, message: str, event_hint: Optional[Event] = None) -> None:
+ self.event_hint = event_hint
+ super().__init__(message)
+
+
+# Some convenience utilities for working with HTTP headers
+def normed_header_dict(h11_headers: Union[Headers, H11Headers]) -> Dict[bytes, bytes]:
+ # This mangles Set-Cookie headers. But it happens that we don't care about
+ # any of those, so it's OK. For every other HTTP header, if there are
+ # multiple instances then you're allowed to join them together with
+ # commas.
+ name_to_values: Dict[bytes, List[bytes]] = {}
+ for name, value in h11_headers:
+ name_to_values.setdefault(name, []).append(value)
+ name_to_normed_value = {}
+ for name, values in name_to_values.items():
+ name_to_normed_value[name] = b", ".join(values)
+ return name_to_normed_value
+
+
+# We use this for parsing the proposed protocol list, and for parsing the
+# proposed and accepted extension lists. For the proposed protocol list it's
+# fine, because the ABNF is just 1#token. But for the extension lists, it's
+# wrong, because those can contain quoted strings, which can in turn contain
+# commas. XX FIXME
+def split_comma_header(value: bytes) -> List[str]:
+ return [piece.decode("ascii").strip() for piece in value.split(b",")]
+
+
+def generate_nonce() -> bytes:
+ # os.urandom may be overkill for this use case, but I don't think this
+ # is a bottleneck, and better safe than sorry...
+ return base64.b64encode(os.urandom(16))
+
+
+def generate_accept_token(token: bytes) -> bytes:
+ accept_token = token + ACCEPT_GUID
+ accept_token = hashlib.sha1(accept_token).digest()
+ return base64.b64encode(accept_token)
diff --git a/app.py b/app.py
index 1379bb9..75ce246 100644
--- a/app.py
+++ b/app.py
@@ -1,17 +1,16 @@
from flask import Flask, render_template
+from flask_socketio import SocketIO, emit
import sqlite3
-import subprocess
-
-#subprocess.Popen(["python", "scanf_face.py"])
-
+import eventlet
app = Flask(__name__)
+socketio = SocketIO(app, async_mode='eventlet')
# 从数据库中获取匹配日志记录
def get_match_logs(db_name="face_database.db"):
conn = sqlite3.connect(db_name)
c = conn.cursor()
- c.execute("SELECT name, identity, image_path, match_time FROM match_logs")
+ c.execute("SELECT name, identity,image_path,match_time FROM match_logs") # 去掉了 image_path
logs = c.fetchall()
conn.close()
return logs
@@ -22,5 +21,19 @@ def index():
logs = get_match_logs()
return render_template('index.html', logs=logs)
+# 处理 WebSocket 连接
+@socketio.on('connect')
+def handle_connect():
+ print('Client connected')
+ emit('update', {'logs': get_match_logs()})
+
+# 发送更新到客户端
+def send_updates():
+ while True:
+ # 模拟实时数据更新
+ socketio.emit('update', {'logs': get_match_logs()}, broadcast=True)
+ eventlet.sleep(5) # 每 5 秒发送一次更新
+
if __name__ == '__main__':
- app.run(debug=True)
+ socketio.start_background_task(send_updates)
+ socketio.run(app, debug=True)
diff --git a/captured_faces/face_1.jpg b/captured_faces/face_1.jpg
index 0db9b57..1e2e725 100644
Binary files a/captured_faces/face_1.jpg and b/captured_faces/face_1.jpg differ
diff --git a/captured_faces/face_10.jpg b/captured_faces/face_10.jpg
index b6dff5f..365f27f 100644
Binary files a/captured_faces/face_10.jpg and b/captured_faces/face_10.jpg differ
diff --git a/captured_faces/face_2.jpg b/captured_faces/face_2.jpg
index e147e0a..cad0577 100644
Binary files a/captured_faces/face_2.jpg and b/captured_faces/face_2.jpg differ
diff --git a/captured_faces/face_3.jpg b/captured_faces/face_3.jpg
index b8ba8dd..6350088 100644
Binary files a/captured_faces/face_3.jpg and b/captured_faces/face_3.jpg differ
diff --git a/captured_faces/face_4.jpg b/captured_faces/face_4.jpg
index bd15c4c..a5c591b 100644
Binary files a/captured_faces/face_4.jpg and b/captured_faces/face_4.jpg differ
diff --git a/captured_faces/face_5.jpg b/captured_faces/face_5.jpg
index 42d215b..df01368 100644
Binary files a/captured_faces/face_5.jpg and b/captured_faces/face_5.jpg differ
diff --git a/captured_faces/face_6.jpg b/captured_faces/face_6.jpg
index d586a5e..0fe45cd 100644
Binary files a/captured_faces/face_6.jpg and b/captured_faces/face_6.jpg differ
diff --git a/captured_faces/face_7.jpg b/captured_faces/face_7.jpg
index ce2b7af..acf89c2 100644
Binary files a/captured_faces/face_7.jpg and b/captured_faces/face_7.jpg differ
diff --git a/captured_faces/face_8.jpg b/captured_faces/face_8.jpg
index 6163799..8dc3229 100644
Binary files a/captured_faces/face_8.jpg and b/captured_faces/face_8.jpg differ
diff --git a/captured_faces/face_9.jpg b/captured_faces/face_9.jpg
index 248b71c..3f12f4f 100644
Binary files a/captured_faces/face_9.jpg and b/captured_faces/face_9.jpg differ
diff --git a/db_image/test2.jpg b/db_image/test2.jpg
new file mode 100644
index 0000000..fd7cba8
Binary files /dev/null and b/db_image/test2.jpg differ
diff --git a/db_image/test3.jpg b/db_image/test3.jpg
new file mode 100644
index 0000000..2a13c5d
Binary files /dev/null and b/db_image/test3.jpg differ
diff --git a/face_database.db b/face_database.db
deleted file mode 100644
index 51f6057..0000000
Binary files a/face_database.db and /dev/null differ
diff --git a/requirements.txt b/requirements.txt
index 8a515c4..94ea33f 100644
Binary files a/requirements.txt and b/requirements.txt differ
diff --git a/scanf_face.py b/scanf_face.py
index 8303e95..cd6281b 100644
--- a/scanf_face.py
+++ b/scanf_face.py
@@ -14,17 +14,17 @@ max_photos = 10
save_path = "./captured_faces"
os.makedirs(save_path, exist_ok=True)
-
def create_face_database(db_name="face_database.db"):
+ """创建人脸数据库和匹配日志表"""
conn = sqlite3.connect(db_name)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS faces
(id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
identity TEXT NOT NULL,
+ image_path TEXT NOT NULL,
encoding BLOB NOT NULL)''')
- # 创建匹配日志表
c.execute('''CREATE TABLE IF NOT EXISTS match_logs
(id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
@@ -34,59 +34,37 @@ def create_face_database(db_name="face_database.db"):
conn.commit()
conn.close()
-
def add_face_to_database(name, identity, image_path, db_name="face_database.db"):
+ """将人脸信息添加到数据库"""
conn = sqlite3.connect(db_name)
c = conn.cursor()
- # 加载图片并生成编码
image = face_recognition.load_image_file(image_path)
face_encodings = face_recognition.face_encodings(image)
if face_encodings:
face_encoding = face_encodings[0]
- # 将编码转换为可以存储的格式
encoding_blob = np.array(face_encoding).tobytes()
- c.execute("INSERT INTO faces (name, identity, encoding) VALUES (?, ?, ?)",
- (name, identity, encoding_blob))
+ c.execute("INSERT INTO faces (name, identity, image_path, encoding) VALUES (?, ?, ?, ?)",
+ (name, identity, image_path, encoding_blob))
conn.commit()
conn.close()
-
-def log_match(name, identity, image_path, db_name="face_database.db", log_file="match_log.txt"):
- conn = sqlite3.connect(db_name)
- c = conn.cursor()
-
- # 获取当前时间
- match_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
-
- # 将匹配信息插入到匹配日志表中
- c.execute("INSERT INTO match_logs (name, identity, image_path, match_time) VALUES (?, ?, ?, ?)",
- (name, identity, image_path, match_time))
- conn.commit()
- conn.close()
-
- # 将匹配信息写入文本文件
- with open(log_file, "a") as f:
- f.write(f"匹配成功: {name} ({identity}) 在 {image_path} 时间: {match_time}\n")
-
-
def match_faces(captured_images, db_name="face_database.db", tolerance=0.4, log_file="match_log.txt"):
+ """比对抓拍的图片与数据库中的已知人脸"""
conn = sqlite3.connect(db_name)
c = conn.cursor()
- # 获取数据库中所有存储的人脸编码
c.execute("SELECT name, identity, encoding FROM faces")
known_faces = c.fetchall()
for image_path in captured_images:
- # 加载待匹配图片并生成编码
unknown_image = face_recognition.load_image_file(image_path)
face_encodings = face_recognition.face_encodings(unknown_image)
if len(face_encodings) == 0:
- print(f"没有人脸 {image_path}")
- continue # 如果没有检测到人脸,跳过该图片
+ print(f"没有检测到人脸:{image_path}")
+ continue
unknown_encoding = face_encodings[0]
@@ -94,20 +72,39 @@ def match_faces(captured_images, db_name="face_database.db", tolerance=0.4, log_
known_encoding = np.frombuffer(encoding_blob, dtype=np.float64)
match = face_recognition.compare_faces([known_encoding], unknown_encoding, tolerance=tolerance)
- if match[0]: # 如果匹配成功
- print(f"发现匹配: {name} ({identity}) 在 {image_path}")
- log_match(name, identity, image_path, db_name, log_file) # 记录匹配信息和时间到数据库和TXT文件
+ if match[0]:
+ print(f"匹配成功: {name} ({identity}) 在 {image_path}")
+ log_match(name, identity, image_path, db_name, log_file)
+ c.execute("UPDATE faces SET image_path = ? WHERE name = ? AND identity = ?",
+ (image_path, name, identity))
+ conn.commit()
conn.close()
- return True # 一旦找到匹配,返回成功
- print(f"没发现匹配: 在 {image_path}")
+ return True
+
+ print(f"未发现匹配: 在 {image_path} 中的任何已知人脸")
+
+ conn.close()
+ return False
+
+def log_match(name, identity, image_path, db_name, log_file):
+ """记录匹配结果"""
+ with open(log_file, 'a') as log:
+ log.write(f"{name},{identity},{image_path}\n")
+
+ conn = sqlite3.connect(db_name)
+ c = conn.cursor()
+ c.execute("INSERT INTO match_logs (name, identity, image_path, match_time) VALUES (?, ?, ?, ?)",
+ (name, identity, image_path, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
+ conn.commit()
conn.close()
- return False # 如果所有比较都没有匹配,返回失败
# 创建人脸数据库
create_face_database()
# 向数据库中添加人脸
+add_face_to_database("小霖老师", "居民", "./db_image/test2.jpg")
add_face_to_database("屈礼", "居民", "./db_image/test.jpg")
+add_face_to_database("岳老师", "居民", "./db_image/test3.jpg")
# 主程序循环
while True:
@@ -115,10 +112,7 @@ while True:
if not ret:
break
- # 将图像转换为RGB颜色
rgb_frame = frame[:, :, ::-1]
-
- # 检测人脸
face_locations = face_recognition.face_locations(rgb_frame)
if face_locations:
@@ -137,14 +131,9 @@ while True:
for face_location in face_locations:
top, right, bottom, left = face_location
-
- # 在图像上绘制绿框
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
- # 提取人脸区域
face_image = frame[top:bottom, left:right]
-
- # 保存抓拍的照片
image_path = os.path.join(save_path, f"face_{photo_count + 1}.jpg")
cv2.imwrite(image_path, face_image)
captured_images.append(image_path)
@@ -153,20 +142,17 @@ while True:
if photo_count >= max_photos:
break
- # 显示结果
cv2.imshow("Capturing Faces", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
- # 关闭窗口
cv2.destroyAllWindows()
if match_faces(captured_images):
- print("至少一张匹配")
+ print("匹配成功")
else:
print("没有匹配")
- # 等待60秒后继续循环检测
print("等待30秒后继续...")
time.sleep(30)
diff --git a/templates/index.html b/templates/index.html
index b785b75..18233b7 100644
--- a/templates/index.html
+++ b/templates/index.html
@@ -1,60 +1,77 @@
-
+
-
-
- 匹配记录
+ Face Match Logs
+
+
-