From 3ad2d1ecc1704aeef27214151111918e055ca13c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 14 Sep 2021 23:43:29 -0500 Subject: [PATCH 001/551] Fixes to the Travis build. (#1111) These fixes were originally implemented by user tbbharaj in https://github.com/datastax/python-driver/pull/1108. Extracting them into their own PR since 1108 is still being worked and I'd very much like to benefit from this work across _all_ PRs against python-driver. Major thanks to tbbharaj for the original work here. --- .travis.yml | 1 + tox.ini | 1 + 2 files changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index b485e21227..7e1e374822 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,6 +24,7 @@ addons: - libev-dev install: + - pip install --upgrade setuptools - pip install tox-travis - if [[ $TRAVIS_PYTHON_VERSION != pypy3.5 ]]; then pip install lz4; fi diff --git a/tox.ini b/tox.ini index d883a1f973..6d94e11247 100644 --- a/tox.ini +++ b/tox.ini @@ -12,6 +12,7 @@ deps = nose pure-sasl kerberos futurist + greenlet>=0.4.14,<0.4.17 lz4_dependency = py27,py35,py36,py37,py38: lz4 [testenv] From a51ed116471a63a65c63db6356a3ade9efdd1b85 Mon Sep 17 00:00:00 2001 From: Piotr Sarna Date: Wed, 15 Sep 2021 07:06:54 +0200 Subject: [PATCH 002/551] Merge pull request #1103 from psarna/fix_deprecation_in_tracing Tracing code uses a deprecated mechanism for fetching the first row when populating traces. The behavior is now fixed. --- cassandra/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index 0e7a41dc2d..f7a5b8fdf5 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -996,7 +996,8 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): SimpleStatement(self._SELECT_SESSIONS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) # PYTHON-730: There is race condition that the duration mutation is written before started_at the for fast queries - is_complete = session_results and session_results[0].duration is not None and session_results[0].started_at is not None + session_row = session_results.one() if session_results else None + is_complete = session_row is not None and session_row.duration is not None and session_row.started_at is not None if not session_results or (wait_for_complete and not is_complete): time.sleep(self._BASE_RETRY_SLEEP * (2 ** attempt)) attempt += 1 @@ -1006,7 +1007,6 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): else: log.debug("Fetching parital trace info for trace ID: %s", self.trace_id) - session_row = session_results[0] self.request_type = session_row.request self.duration = timedelta(microseconds=session_row.duration) if is_complete else None self.started_at = session_row.started_at From 1d9077d3f4c937929acc14f45c7693e76dde39a9 Mon Sep 17 00:00:00 2001 From: Ultrabug Date: Fri, 17 Sep 2021 19:40:42 +0200 Subject: [PATCH 003/551] Merge pull request #1103 from numberly/fix_empty_paging This commit provides a fix to the situation when iterating on a ResultSet, the driver aborts the iteration if the server returns an empty page even if there are next pages available. Python driver is affected by the same problem as JAVA-2934 This fix is similar to https://github.com/datastax/java-driver/pull/1544 --- cassandra/cluster.py | 1 + tests/unit/test_resultset.py | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 7e101afba8..c2d2e719ac 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -5141,6 +5141,7 @@ def next(self): if not self.response_future._continuous_paging_session: self.fetch_next_page() self._page_iter = iter(self._current_rows) + return self.next() return next(self._page_iter) diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index 1af3e849b6..b37c3a2594 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -41,6 +41,19 @@ def test_iter_paged(self): type(response_future).has_more_pages = PropertyMock(side_effect=(True, True, False)) # after init to avoid side effects being consumed by init self.assertListEqual(list(itr), expected) + def test_iter_paged_with_empty_pages(self): + expected = list(range(10)) + response_future = Mock(has_more_pages=True, _continuous_paging_session=None) + response_future.result.side_effect = [ + ResultSet(Mock(), []), + ResultSet(Mock(), [0, 1, 2, 3, 4]), + ResultSet(Mock(), []), + ResultSet(Mock(), [5, 6, 7, 8, 9]), + ] + rs = ResultSet(response_future, []) + itr = iter(rs) + self.assertListEqual(list(itr), expected) + def test_list_non_paged(self): # list access on RS for backwards-compatibility expected = list(range(10)) From 12a8adce943fe37a05ad6580e8bd302b65c2d93a Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 17 Sep 2021 12:47:48 -0500 Subject: [PATCH 004/551] Comment update following off of https://github.com/datastax/python-driver/pull/1110 --- cassandra/cluster.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c2d2e719ac..dc850ae809 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -5141,6 +5141,11 @@ def next(self): if not self.response_future._continuous_paging_session: self.fetch_next_page() self._page_iter = iter(self._current_rows) + + # Some servers can return empty pages in this case; Scylla is known to do + # so in some circumstances. Guard against this by recursing to handle + # the next(iter) call. If we have an empty page in that case it will + # get handled by the StopIteration handler when we recurse. return self.next() return next(self._page_iter) From 175942852bcfc97bac823834a0b170b0faa4adb0 Mon Sep 17 00:00:00 2001 From: Oren Efraimov Date: Tue, 23 Nov 2021 19:10:50 +0200 Subject: [PATCH 005/551] Merge pull request #1116 from Orenef11/fix_default_argument_value Removing Python mutable defaults from methods in tests/integration/__init__.py Co-authored-by: Efraimov Oren --- tests/integration/__init__.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9d350af707..70ec11c213 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -383,15 +383,15 @@ def get_node(node_id): return CCM_CLUSTER.nodes['node%s' % node_id] -def use_multidc(dc_list, workloads=[]): +def use_multidc(dc_list, workloads=None): use_cluster(MULTIDC_CLUSTER_NAME, dc_list, start=True, workloads=workloads) -def use_singledc(start=True, workloads=[], use_single_interface=USE_SINGLE_INTERFACE): +def use_singledc(start=True, workloads=None, use_single_interface=USE_SINGLE_INTERFACE): use_cluster(CLUSTER_NAME, [3], start=start, workloads=workloads, use_single_interface=use_single_interface) -def use_single_node(start=True, workloads=[], configuration_options={}, dse_options={}): +def use_single_node(start=True, workloads=None, configuration_options=None, dse_options=None): use_cluster(SINGLE_NODE_CLUSTER_NAME, [1], start=start, workloads=workloads, configuration_options=configuration_options, dse_options=dse_options) @@ -453,10 +453,11 @@ def start_cluster_wait_for_up(cluster): def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_keyspace=True, ccm_options=None, - configuration_options={}, dse_options={}, use_single_interface=USE_SINGLE_INTERFACE): + configuration_options=None, dse_options=None, use_single_interface=USE_SINGLE_INTERFACE): + configuration_options = configuration_options or {} + dse_options = dse_options or {} + workloads = workloads or [] dse_cluster = True if DSE_VERSION else False - if not workloads: - workloads = [] if ccm_options is None and DSE_VERSION: ccm_options = {"version": CCM_VERSION} From 387150acc365b6cf1daaee58c62db13e4929099a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Jastrz=C4=99bski?= Date: Tue, 23 Nov 2021 18:18:33 +0100 Subject: [PATCH 006/551] Merge pull request #1114 from haaawk/stream_ids_fix Stop reusing stream ids of requests that have timed out due to client-side timeout (#1114) * ResponseFuture: do not return the stream ID on client timeout When a timeout occurs, the ResponseFuture associated with the query returns its stream ID to the associated connection's free stream ID pool - so that the stream ID can be immediately reused by another query. However, that it incorrect and dangerous. If query A times out before it receives a response from the cluster, a different query B might be issued on the same connection and stream. If response for query A arrives earlier than the response for query B, the first one might be misinterpreted as the response for query B. This commit changes the logic so that stream IDs are not returned on timeout - now, they are only returned after receiving a response. * Connection: fix tracking of in_flight requests This commit fixes tracking of in_flight requests. Before it, in case of a client-side timeout, the response ID was not returned to the pool, but the in_flight counter was decremented anyway. This counter is used to determine if there is a need to wait for stream IDs to be freed - without this patch, it could happen that the driver throught that it can initiate another request due to in_flight counter being low, but there weren't any free stream IDs to allocate, so an assertion was triggered and the connection was defuncted and opened again. Now, requests timed out on the client side are tracked in the orphaned_request_ids field, and the in_flight counter is decremented only after the response is received. * Connection: notify owning pool about released orphaned streams Before this patch, the following situation could occur: 1. On a single connection, multiple requests are spawned up to the maximum concurrency, 2. We want to issue more requests but we need to wait on a condition variable because requests spawned in 1. took all stream IDs and we need to wait until some of them are freed, 3. All requests from point 1. time out on the client side - we cannot free their stream IDs until the database node responds, 4. Responses for requests issued in point 1. arrive, but the Connection class has no access to the condition variable mentioned in point 2., so no requests from point 2. are admitted, 5. Requests from point 2. waiting on the condition variable time out despite there are stream IDs available. This commit adds an _on_orphaned_stream_released field to the Connection class, and now it notifies the owning pool in case a timed out request receives a late response and a stream ID is freed by calling _on_orphaned_stream_released callback. * HostConnection: implement replacing overloaded connections In a situation of very high overload or poor networking conditions, it might happen that there is a large number of outstanding requests on a single connection. Each request reserves a stream ID which cannot be reused until a response for it arrives, even if the request already timed out on the client side. Because the pool of available stream IDs for a single connection is limited, such situation might cause the set of free stream IDs to shrink to a very small size (including zero), which will drastically reduce the available concurrency on the connection, or even render it unusable for some time. In order to prevent this, the following strategy is adopted: when the number of orphaned stream IDs reaches a certain threshold (e.g. 75% of all available stream IDs), the connection becomes marked as overloaded. Meanwhile, a new connection is opened - when it becomes available, it replaces the old one, and the old connection is moved to "trash" where it waits until all its outstanding requests either respond or time out. This feature is implemented for HostConnection but not for HostConnectionPool, which means that it will only work for clusters which use protocol v3 or newer. This fix is heavily inspired by the fix for JAVA-1519. Co-authored-by: Piotr Dulikowski --- cassandra/cluster.py | 11 ++- cassandra/connection.py | 32 +++++++- cassandra/pool.py | 97 +++++++++++++++++++----- tests/unit/.noseids | Bin 0 -> 30098 bytes tests/unit/test_host_connection_pool.py | 20 ++--- tests/unit/test_response_future.py | 28 +++++++ 6 files changed, 158 insertions(+), 30 deletions(-) create mode 100644 tests/unit/.noseids diff --git a/cassandra/cluster.py b/cassandra/cluster.py index dc850ae809..cf78725f17 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4361,10 +4361,17 @@ def _on_timeout(self, _attempts=0): pool = self.session._pools.get(self._current_host) if pool and not pool.is_shutdown: + # Do not return the stream ID to the pool yet. We cannot reuse it + # because the node might still be processing the query and will + # return a late response to that query - if we used such stream + # before the response to the previous query has arrived, the new + # query could get a response from the old query with self._connection.lock: - self._connection.request_ids.append(self._req_id) + self._connection.orphaned_request_ids.add(self._req_id) + if len(self._connection.orphaned_request_ids) >= self._connection.orphaned_threshold: + self._connection.orphaned_threshold_reached = True - pool.return_connection(self._connection) + pool.return_connection(self._connection, stream_was_orphaned=True) errors = self._errors if not errors: diff --git a/cassandra/connection.py b/cassandra/connection.py index 0d8a50e76f..0869584663 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -690,6 +690,7 @@ class Connection(object): # The current number of operations that are in flight. More precisely, # the number of request IDs that are currently in use. + # This includes orphaned requests. in_flight = 0 # Max concurrent requests allowed per connection. This is set optimistically high, allowing @@ -707,6 +708,20 @@ class Connection(object): # request_ids set highest_request_id = 0 + # Tracks the request IDs which are no longer waited on (timed out), but + # cannot be reused yet because the node might still send a response + # on this stream + orphaned_request_ids = None + + # Set to true if the orphaned stream ID count cross configured threshold + # and the connection will be replaced + orphaned_threshold_reached = False + + # If the number of orphaned streams reaches this threshold, this connection + # will become marked and will be replaced with a new connection by the + # owning pool (currently, only HostConnection supports this) + orphaned_threshold = 3 * max_in_flight // 4 + is_defunct = False is_closed = False lock = None @@ -733,6 +748,8 @@ class Connection(object): _is_checksumming_enabled = False + _on_orphaned_stream_released = None + @property def _iobuf(self): # backward compatibility, to avoid any change in the reactors @@ -742,7 +759,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False, - ssl_context=None): + ssl_context=None, on_orphaned_stream_released=None): # TODO next major rename host to endpoint and remove port kwarg. self.endpoint = host if isinstance(host, EndPoint) else DefaultEndPoint(host, port) @@ -764,6 +781,8 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self._io_buffer = _ConnectionIOBuffer(self) self._continuous_paging_sessions = {} self._socket_writable = True + self.orphaned_request_ids = set() + self._on_orphaned_stream_released = on_orphaned_stream_released if ssl_options: self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) @@ -1188,11 +1207,22 @@ def process_msg(self, header, body): decoder = paging_session.decoder result_metadata = None else: + need_notify_of_release = False + with self.lock: + if stream_id in self.orphaned_request_ids: + self.in_flight -= 1 + self.orphaned_request_ids.remove(stream_id) + need_notify_of_release = True + if need_notify_of_release and self._on_orphaned_stream_released: + self._on_orphaned_stream_released() + try: callback, decoder, result_metadata = self._requests.pop(stream_id) # This can only happen if the stream_id was # removed due to an OperationTimedOut except KeyError: + with self.lock: + self.request_ids.append(stream_id) return try: diff --git a/cassandra/pool.py b/cassandra/pool.py index cd27656046..c82dfe9a6b 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -390,6 +390,10 @@ def __init__(self, host, host_distance, session): # this is used in conjunction with the connection streams. Not using the connection lock because the connection can be replaced in the lifetime of the pool. self._stream_available_condition = Condition(self._lock) self._is_replacing = False + # Contains connections which shouldn't be used anymore + # and are waiting until all requests time out or complete + # so that we can dispose of them. + self._trash = set() if host_distance == HostDistance.IGNORED: log.debug("Not opening connection to ignored host %s", self.host) @@ -399,13 +403,13 @@ def __init__(self, host, host_distance, session): return log.debug("Initializing connection for host %s", self.host) - self._connection = session.cluster.connection_factory(host.endpoint) + self._connection = session.cluster.connection_factory(host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) self._keyspace = session.keyspace if self._keyspace: self._connection.set_keyspace_blocking(self._keyspace) log.debug("Finished initializing connection for host %s", self.host) - def borrow_connection(self, timeout): + def _get_connection(self): if self.is_shutdown: raise ConnectionException( "Pool for %s is shutdown" % (self.host,), self.host) @@ -413,12 +417,25 @@ def borrow_connection(self, timeout): conn = self._connection if not conn: raise NoConnectionsAvailable() + return conn + + def borrow_connection(self, timeout): + conn = self._get_connection() + if conn.orphaned_threshold_reached: + with self._lock: + if not self._is_replacing: + self._is_replacing = True + self._session.submit(self._replace, conn) + log.debug( + "Connection to host %s reached orphaned stream limit, replacing...", + self.host + ) start = time.time() remaining = timeout while True: with conn.lock: - if conn.in_flight < conn.max_request_id: + if not (conn.orphaned_threshold_reached and conn.is_closed) and conn.in_flight < conn.max_request_id: conn.in_flight += 1 return conn, conn.get_request_id() if timeout is not None: @@ -426,15 +443,19 @@ def borrow_connection(self, timeout): if remaining < 0: break with self._stream_available_condition: - self._stream_available_condition.wait(remaining) + if conn.orphaned_threshold_reached and conn.is_closed: + conn = self._get_connection() + else: + self._stream_available_condition.wait(remaining) raise NoConnectionsAvailable("All request IDs are currently in use") - def return_connection(self, connection): - with connection.lock: - connection.in_flight -= 1 - with self._stream_available_condition: - self._stream_available_condition.notify() + def return_connection(self, connection, stream_was_orphaned=False): + if not stream_was_orphaned: + with connection.lock: + connection.in_flight -= 1 + with self._stream_available_condition: + self._stream_available_condition.notify() if connection.is_defunct or connection.is_closed: if connection.signaled_error and not self.shutdown_on_error: @@ -461,6 +482,24 @@ def return_connection(self, connection): return self._is_replacing = True self._session.submit(self._replace, connection) + else: + if connection in self._trash: + with connection.lock: + if connection.in_flight == len(connection.orphaned_request_ids): + with self._lock: + if connection in self._trash: + self._trash.remove(connection) + log.debug("Closing trashed connection (%s) to %s", id(connection), self.host) + connection.close() + return + + def on_orphaned_stream_released(self): + """ + Called when a response for an orphaned stream (timed out on the client + side) was received. + """ + with self._stream_available_condition: + self._stream_available_condition.notify() def _replace(self, connection): with self._lock: @@ -469,7 +508,7 @@ def _replace(self, connection): log.debug("Replacing connection (%s) to %s", id(connection), self.host) try: - conn = self._session.cluster.connection_factory(self.host.endpoint) + conn = self._session.cluster.connection_factory(self.host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) if self._keyspace: conn.set_keyspace_blocking(self._keyspace) self._connection = conn @@ -477,9 +516,15 @@ def _replace(self, connection): log.warning("Failed reconnecting %s. Retrying." % (self.host.endpoint,)) self._session.submit(self._replace, connection) else: - with self._lock: - self._is_replacing = False - self._stream_available_condition.notify() + with connection.lock: + with self._lock: + if connection.orphaned_threshold_reached: + if connection.in_flight == len(connection.orphaned_request_ids): + connection.close() + else: + self._trash.add(connection) + self._is_replacing = False + self._stream_available_condition.notify() def shutdown(self): with self._lock: @@ -493,6 +538,16 @@ def shutdown(self): self._connection.close() self._connection = None + trash_conns = None + with self._lock: + if self._trash: + trash_conns = self._trash + self._trash = set() + + if trash_conns is not None: + for conn in self._trash: + conn.close() + def _set_keyspace_for_all_conns(self, keyspace, callback): if self.is_shutdown or not self._connection: return @@ -548,7 +603,7 @@ def __init__(self, host, host_distance, session): log.debug("Initializing new connection pool for host %s", self.host) core_conns = session.cluster.get_core_connections_per_host(host_distance) - self._connections = [session.cluster.connection_factory(host.endpoint) + self._connections = [session.cluster.connection_factory(host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) for i in range(core_conns)] self._keyspace = session.keyspace @@ -652,7 +707,7 @@ def _add_conn_if_under_max(self): log.debug("Going to open new connection to host %s", self.host) try: - conn = self._session.cluster.connection_factory(self.host.endpoint) + conn = self._session.cluster.connection_factory(self.host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) if self._keyspace: conn.set_keyspace_blocking(self._session.keyspace) self._next_trash_allowed_at = time.time() + _MIN_TRASH_INTERVAL @@ -712,9 +767,10 @@ def _wait_for_conn(self, timeout): raise NoConnectionsAvailable() - def return_connection(self, connection): + def return_connection(self, connection, stream_was_orphaned=False): with connection.lock: - connection.in_flight -= 1 + if not stream_was_orphaned: + connection.in_flight -= 1 in_flight = connection.in_flight if connection.is_defunct or connection.is_closed: @@ -750,6 +806,13 @@ def return_connection(self, connection): else: self._signal_available_conn() + def on_orphaned_stream_released(self): + """ + Called when a response for an orphaned stream (timed out on the client + side) was received. + """ + self._signal_available_conn() + def _maybe_trash_connection(self, connection): core_conns = self._session.cluster.get_core_connections_per_host(self.host_distance) did_trash = False diff --git a/tests/unit/.noseids b/tests/unit/.noseids new file mode 100644 index 0000000000000000000000000000000000000000..1c956146fc04cc0b92d287530f5167fb38737082 GIT binary patch literal 30098 zcmcg#cX%At5r;rX140PI*noSJkR=PtHrO;7Ng z{r3iE(SLU~wua}JMXv#09638YJXUrpVyvv|dTlZKU$yR+9eZ2RHJ5}t<_pjF#sb^) z$MoWoZX2RFW>`*8q)#;ZaQ>r}MUW)VaP-Wn77W_1cut9MTBa%)#deXJ_}#9F|%rJD?(0 zc#akLrehN@kLC5YE>Ca;e1jd1p$t5?g@(GQxDKL?ph&L#X< z^PIpngl6crRuGyee642sWletA`BPzH=kZUhd~tY3KWt48#3DT5E{P;v-PWzTZyKJK zO8jx*fn{ijWFutvD@pm65uRiPGY!2z)fyJ+fU9*<`uQ(hUaXoZ6H z;McX&37kL|wE>+(geP($T-=IK*1a-fJc$!ybt^^%TxeOhw5p~1^Nw3Vyz@En z+UDenDmkkQI5945os;LgU;sW&G`;)X|xL=*poTIRCC84jE)FM`!tzk5Pvv1%-W#q?hb;(aqWu*W$xahlwvYQh+Iu8>?31^@~ z991mQ9IrS7SXJGpt^P_jtTmbQf}@!)PN5W6v!Tgl*CEAdw_n2sB>h?n5bCeF0@j`( zJQI#zD9CkecTH-jh1G?cmZauNqO0-F+ zDY{Ngo0mh7z(zI#BiRI;!h$g14BHC5i49u%m{}j3W<}UZHxunko7u4B0>QIaLc-|E zGz^QQ1-a$X#Y1=2g140oZv&~ZZGO_FwIB{HD{?g<6fBh?GQ{*$_JKDV)(FQ@+LM$F z+UHG~hRQQcO=>y>0Rn6*u#50#!Gws{u}q_m);-KdvV3=VbvgwMYvG`RDH{0F21AFi zH^Jd5P7Y&TICxcIP=X>sV1$#vmUIHCIgxFgz&o>mtBwt8-h?xDh5|-8Y3xp?Azf^j z9MKpjk!^WIQq{4Y6MjP;eBuN&&I!6T9h4HJkjM0en=t#UIg!_=BfFTiOJMHc#N3>Y znYP3yIB_$i>^qCXR^B%Dusb=CGdygq3~`_Zse=qJzl)Q@#&oIIAcm^8*w=1O)OCn@ zP^cYF=Fm+W!cnuR$><5UvuijZX`*eAUSJr)^X3C|rhed3Hu>6Rp+?oCE<1fRtP$>^ z0OXh|I?XwgCyt_UR1%3ItqR2IX4Qhqf)zkQ$u;egwkYb93E0ENw)}8-s1GiXru0qP z`*ny!Qkd=KWG3@Ly-6u%776J(PAU`Kg=9Nv0=b@(#o_KOJg^fmnig?royx+Mb^|B1 zow?LdCsiF%b|9n@oz#R~f^;;}xRH~_?%rr5GD0ztn>dN&8;q|PKuI7Bb?9*1W~OK# zCkuIA&00Z{1<4}HQ|;$OAI()p0zJ+qO`bV?C3>2^r56O{1gDmTPa%T|1$OCWQ7H-h!y(z94Q^|v+=2^ zHmTsC=#yrXjZe)XQnN6m5$?WaDpCH&%q=8!%+or>)Lkgf^qK!OsF6vnEU<8sX+!aAfO*eX}BD2O@q2n{S2s5O3+zyPA%)H{Gj4JmY}fV4zPaLt8$5?a(V`6w=2iV_+p z31v_adP#@s1P~|$^YfrB&pb^up2baLB9{g_5NPW4JROQwzy=K3^#d~LXLB>zmCK~& z1XhtWlOmPpaC6y{$E9MP62+!eFe#8w5(QJw<>oV%$A^*>P}3;G?gFNQn+#g)?c5}` z_d+5vmWah2+$^^C!oq_FSCoOqjXs=WF(VGMreAH6@FV+`1YXGWyRmr6+a3`JyzT;*|;5nsLzU=E%El?55q4E z!~<6RY%kP~-wZyNSP5i%fc&tt#>!$M(*U{zoTpf6$c`K$Wys<{CjojZO@o#uOYIoQ zrd zV!!*0VM8ylFqbgli%%CI_C3tlBMH-^(@eKh-iw(DP@kp}e1bswS>`3-wSApseDEq- z^UZ=w@UJTW-u$EyPN4{9EXZoliBtYQc;Ns$u?-!|8Z=Dcg&Hgw8E^+0OWo>9J-kF= zK-Wf7H!lsZ>z|#f^ilz3MLR+DVIv%wls$q-Rg#MOD#1DXP=bN>UA&Sj3tnI+FIy&; zg@@2BZ|i_{#mNAr2+wF}^A_HI(EKu7$|*Y*?Oyk?;0|@*LTVqpAlyC2-6)_`k z*RgOr(i1l*iL??>Dgv&`Tq>~Bpu}F!!s^&^SvA!e@p=Obubc%N*)Ez+e8siF0(*V7 zIN%8#yp;voX?^ygtxMs479M+YctrO?<$mC44z#zr*uxU9<2SO9*gFUbr5&`;2Uu9- ztm(D|jV`sS(s!dSex>uxLrYg@xAmj5f{FlLN^(TLv4g1u=LaxBHb64YuGMe~@`fcI4NI`$jE zmVkI~W5Ju4l^v3Cm>xxPnQlZ-%gWnX;IHW)K6V}thBppydU0$ca9xln9bi_W`jRIs zuzU46G^I^Oe=+D@zf=btaRagoXoIgnM-!bQ-m!cx3;&e|m~>2hCjnfj3tcG~6TrCw zmIy(=lM8xx#&px6wUV%fi)T)>#&>a%xM@HVnrAt+s44#4Tx@pr$0o_)JzN~N^~S-4 z`Yt>hiOP5{7j%ynp`cqW;vp^)Jz7Kui581^9~YY*Eh1u*w21c)$U*hnXZkDkIn?;U zQq*4#31}`X2%R3~kbVn}j2`sbfqxHdgSM;>ET7QA1&_f>b}q1dy3lBHqpZLOxv+fFpc`{RF$D?t32>Sp;X>S=ix}AA6g+(a zn;=ZrgC`-R1NtZz`qmgdj!+6vSZD@f==4K-ta3pMSL>QF3Cr{KFNi;Ax*2c<7+0CXz(c^8B{&Y z1(@&=u|h$h*bQ!-o`AD*i+Df9g^T-V$89NHAiBuiq zHKc?^8QLI$5w&^mI=UKwll?3gY+?(HH67SqP^~)fJRHdUq=h$}qm}eI zE?|t(I#i7>TBoWT=lppt=!q%$X!f*SoWYt(Jn`r(7db3+oA8>35BGtQ!56p~3}-TE zB107hF3I@xJk%GtaEH3%g4cAcC0bEm;(}dGWxrU=s`Ter@K}o4i@w}vYpD;%P+X9{ z>rm=WnG(f($?n&TagA^c><_;=%?wo~+=J9YMaP;T6a-QKU*RLUa17(XUP#D0)G>*# za+4VEE(za(`|eJuo{+`YxM`p#%2EjIJXnF$)b>ut_^)#l*wF(4y2DRzanLhEA~vhY_5hd!kq<|)3>;39%i8#(fc+xz5RpGgM$$@ zvf_0`L8+tfaPzux5MHjRKz*WVa^K}9g_ps*xr6rANViMXf$n?Ugk}dJ1OW~Gng$CV z2$5$#N=usaS_uve^dQCexrrVagecW8nhR7wz=tdJ2i)B9_L|7at8F)NIR7Cxjfor) z)pXORHWbA}FS&xwSSXMmaWmO72ow1{1M>MXHy?LEJ{CM;QP7P=thz)0kXy&3JD9l? zq6hUKD9Kc?BNl&aNy~D;dHjTX9(g`3Q^T5DH8jg{7UeDDpK_DBejwp`i)OW|idaA6 zrZhJYr5@bc&$*dyp}S&mk^oV}M+v&5)luvFMR-I1m9FUMCEs6Ii6@6-p{w~Z)VXGD zbGqa37!A;pN4H-to5{j`cv9Gu51BZ%{fY^3Bo)!K&1O2U?R5OeK)_M&9i7}dg&utj zZR}vFQG-U@$OlT2*?P5I;F&NQg^uOd%OR1cCpN>TmN9D6+7sE|@D}@5WKl1a~nae}k1h#KtEv{89f8r;zs~0kmCB%wg8=F7#vzZ)>4c_v{ z%6{{3_7{F?qq)@Jp-_OYL7P4NDh>$y=(mjgm7m4ld=~Kyu#AQMH-1WO3p?R;Q$5|^ z`5EkJ6-t6ZzL)z4Kb6gyRLboh?w|bVBl+l^-tAxf9L6&_ Date: Tue, 23 Nov 2021 11:12:58 -0600 Subject: [PATCH 007/551] Removing file unexpectedly included in previous PR --- tests/unit/.noseids | Bin 30098 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/unit/.noseids diff --git a/tests/unit/.noseids b/tests/unit/.noseids deleted file mode 100644 index 1c956146fc04cc0b92d287530f5167fb38737082..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30098 zcmcg#cX%At5r;rX140PI*noSJkR=PtHrO;7Ng z{r3iE(SLU~wua}JMXv#09638YJXUrpVyvv|dTlZKU$yR+9eZ2RHJ5}t<_pjF#sb^) z$MoWoZX2RFW>`*8q)#;ZaQ>r}MUW)VaP-Wn77W_1cut9MTBa%)#deXJ_}#9F|%rJD?(0 zc#akLrehN@kLC5YE>Ca;e1jd1p$t5?g@(GQxDKL?ph&L#X< z^PIpngl6crRuGyee642sWletA`BPzH=kZUhd~tY3KWt48#3DT5E{P;v-PWzTZyKJK zO8jx*fn{ijWFutvD@pm65uRiPGY!2z)fyJ+fU9*<`uQ(hUaXoZ6H z;McX&37kL|wE>+(geP($T-=IK*1a-fJc$!ybt^^%TxeOhw5p~1^Nw3Vyz@En z+UDenDmkkQI5945os;LgU;sW&G`;)X|xL=*poTIRCC84jE)FM`!tzk5Pvv1%-W#q?hb;(aqWu*W$xahlwvYQh+Iu8>?31^@~ z991mQ9IrS7SXJGpt^P_jtTmbQf}@!)PN5W6v!Tgl*CEAdw_n2sB>h?n5bCeF0@j`( zJQI#zD9CkecTH-jh1G?cmZauNqO0-F+ zDY{Ngo0mh7z(zI#BiRI;!h$g14BHC5i49u%m{}j3W<}UZHxunko7u4B0>QIaLc-|E zGz^QQ1-a$X#Y1=2g140oZv&~ZZGO_FwIB{HD{?g<6fBh?GQ{*$_JKDV)(FQ@+LM$F z+UHG~hRQQcO=>y>0Rn6*u#50#!Gws{u}q_m);-KdvV3=VbvgwMYvG`RDH{0F21AFi zH^Jd5P7Y&TICxcIP=X>sV1$#vmUIHCIgxFgz&o>mtBwt8-h?xDh5|-8Y3xp?Azf^j z9MKpjk!^WIQq{4Y6MjP;eBuN&&I!6T9h4HJkjM0en=t#UIg!_=BfFTiOJMHc#N3>Y znYP3yIB_$i>^qCXR^B%Dusb=CGdygq3~`_Zse=qJzl)Q@#&oIIAcm^8*w=1O)OCn@ zP^cYF=Fm+W!cnuR$><5UvuijZX`*eAUSJr)^X3C|rhed3Hu>6Rp+?oCE<1fRtP$>^ z0OXh|I?XwgCyt_UR1%3ItqR2IX4Qhqf)zkQ$u;egwkYb93E0ENw)}8-s1GiXru0qP z`*ny!Qkd=KWG3@Ly-6u%776J(PAU`Kg=9Nv0=b@(#o_KOJg^fmnig?royx+Mb^|B1 zow?LdCsiF%b|9n@oz#R~f^;;}xRH~_?%rr5GD0ztn>dN&8;q|PKuI7Bb?9*1W~OK# zCkuIA&00Z{1<4}HQ|;$OAI()p0zJ+qO`bV?C3>2^r56O{1gDmTPa%T|1$OCWQ7H-h!y(z94Q^|v+=2^ zHmTsC=#yrXjZe)XQnN6m5$?WaDpCH&%q=8!%+or>)Lkgf^qK!OsF6vnEU<8sX+!aAfO*eX}BD2O@q2n{S2s5O3+zyPA%)H{Gj4JmY}fV4zPaLt8$5?a(V`6w=2iV_+p z31v_adP#@s1P~|$^YfrB&pb^up2baLB9{g_5NPW4JROQwzy=K3^#d~LXLB>zmCK~& z1XhtWlOmPpaC6y{$E9MP62+!eFe#8w5(QJw<>oV%$A^*>P}3;G?gFNQn+#g)?c5}` z_d+5vmWah2+$^^C!oq_FSCoOqjXs=WF(VGMreAH6@FV+`1YXGWyRmr6+a3`JyzT;*|;5nsLzU=E%El?55q4E z!~<6RY%kP~-wZyNSP5i%fc&tt#>!$M(*U{zoTpf6$c`K$Wys<{CjojZO@o#uOYIoQ zrd zV!!*0VM8ylFqbgli%%CI_C3tlBMH-^(@eKh-iw(DP@kp}e1bswS>`3-wSApseDEq- z^UZ=w@UJTW-u$EyPN4{9EXZoliBtYQc;Ns$u?-!|8Z=Dcg&Hgw8E^+0OWo>9J-kF= zK-Wf7H!lsZ>z|#f^ilz3MLR+DVIv%wls$q-Rg#MOD#1DXP=bN>UA&Sj3tnI+FIy&; zg@@2BZ|i_{#mNAr2+wF}^A_HI(EKu7$|*Y*?Oyk?;0|@*LTVqpAlyC2-6)_`k z*RgOr(i1l*iL??>Dgv&`Tq>~Bpu}F!!s^&^SvA!e@p=Obubc%N*)Ez+e8siF0(*V7 zIN%8#yp;voX?^ygtxMs479M+YctrO?<$mC44z#zr*uxU9<2SO9*gFUbr5&`;2Uu9- ztm(D|jV`sS(s!dSex>uxLrYg@xAmj5f{FlLN^(TLv4g1u=LaxBHb64YuGMe~@`fcI4NI`$jE zmVkI~W5Ju4l^v3Cm>xxPnQlZ-%gWnX;IHW)K6V}thBppydU0$ca9xln9bi_W`jRIs zuzU46G^I^Oe=+D@zf=btaRagoXoIgnM-!bQ-m!cx3;&e|m~>2hCjnfj3tcG~6TrCw zmIy(=lM8xx#&px6wUV%fi)T)>#&>a%xM@HVnrAt+s44#4Tx@pr$0o_)JzN~N^~S-4 z`Yt>hiOP5{7j%ynp`cqW;vp^)Jz7Kui581^9~YY*Eh1u*w21c)$U*hnXZkDkIn?;U zQq*4#31}`X2%R3~kbVn}j2`sbfqxHdgSM;>ET7QA1&_f>b}q1dy3lBHqpZLOxv+fFpc`{RF$D?t32>Sp;X>S=ix}AA6g+(a zn;=ZrgC`-R1NtZz`qmgdj!+6vSZD@f==4K-ta3pMSL>QF3Cr{KFNi;Ax*2c<7+0CXz(c^8B{&Y z1(@&=u|h$h*bQ!-o`AD*i+Df9g^T-V$89NHAiBuiq zHKc?^8QLI$5w&^mI=UKwll?3gY+?(HH67SqP^~)fJRHdUq=h$}qm}eI zE?|t(I#i7>TBoWT=lppt=!q%$X!f*SoWYt(Jn`r(7db3+oA8>35BGtQ!56p~3}-TE zB107hF3I@xJk%GtaEH3%g4cAcC0bEm;(}dGWxrU=s`Ter@K}o4i@w}vYpD;%P+X9{ z>rm=WnG(f($?n&TagA^c><_;=%?wo~+=J9YMaP;T6a-QKU*RLUa17(XUP#D0)G>*# za+4VEE(za(`|eJuo{+`YxM`p#%2EjIJXnF$)b>ut_^)#l*wF(4y2DRzanLhEA~vhY_5hd!kq<|)3>;39%i8#(fc+xz5RpGgM$$@ zvf_0`L8+tfaPzux5MHjRKz*WVa^K}9g_ps*xr6rANViMXf$n?Ugk}dJ1OW~Gng$CV z2$5$#N=usaS_uve^dQCexrrVagecW8nhR7wz=tdJ2i)B9_L|7at8F)NIR7Cxjfor) z)pXORHWbA}FS&xwSSXMmaWmO72ow1{1M>MXHy?LEJ{CM;QP7P=thz)0kXy&3JD9l? zq6hUKD9Kc?BNl&aNy~D;dHjTX9(g`3Q^T5DH8jg{7UeDDpK_DBejwp`i)OW|idaA6 zrZhJYr5@bc&$*dyp}S&mk^oV}M+v&5)luvFMR-I1m9FUMCEs6Ii6@6-p{w~Z)VXGD zbGqa37!A;pN4H-to5{j`cv9Gu51BZ%{fY^3Bo)!K&1O2U?R5OeK)_M&9i7}dg&utj zZR}vFQG-U@$OlT2*?P5I;F&NQg^uOd%OR1cCpN>TmN9D6+7sE|@D}@5WKl1a~nae}k1h#KtEv{89f8r;zs~0kmCB%wg8=F7#vzZ)>4c_v{ z%6{{3_7{F?qq)@Jp-_OYL7P4NDh>$y=(mjgm7m4ld=~Kyu#AQMH-1WO3p?R;Q$5|^ z`5EkJ6-t6ZzL)z4Kb6gyRLboh?w|bVBl+l^-tAxf9L6&_ Date: Mon, 24 Jan 2022 14:42:08 -0600 Subject: [PATCH 008/551] Merge pull request #1117 from datastax/remove_unittest2 PYTHON-1289 Removing unittest2 from the dependency list --- test-requirements.txt | 1 - tests/__init__.py | 5 +---- tests/integration/__init__.py | 5 +---- tests/integration/advanced/__init__.py | 5 +---- tests/integration/advanced/graph/fluent/__init__.py | 5 +---- tests/integration/advanced/graph/fluent/test_graph.py | 5 +---- tests/integration/advanced/graph/test_graph_datatype.py | 5 +---- tests/integration/advanced/graph/test_graph_query.py | 5 +---- tests/integration/advanced/test_adv_metadata.py | 5 +---- tests/integration/advanced/test_auth.py | 5 +---- tests/integration/advanced/test_cont_paging.py | 5 +---- .../integration/advanced/test_cqlengine_where_operators.py | 5 +---- tests/integration/advanced/test_geometry.py | 5 +---- tests/integration/advanced/test_unixsocketendpoint.py | 5 +---- tests/integration/cloud/__init__.py | 5 +---- tests/integration/cloud/test_cloud.py | 5 +---- tests/integration/cqlengine/__init__.py | 5 +---- tests/integration/cqlengine/advanced/test_cont_paging.py | 5 +---- tests/integration/cqlengine/base.py | 5 +---- tests/integration/cqlengine/columns/test_static_column.py | 5 +---- tests/integration/cqlengine/columns/test_validation.py | 5 +---- tests/integration/cqlengine/columns/test_value_io.py | 5 +---- tests/integration/cqlengine/connections/test_connection.py | 5 +---- tests/integration/cqlengine/management/test_management.py | 5 +---- tests/integration/cqlengine/model/test_model.py | 5 +---- tests/integration/cqlengine/model/test_model_io.py | 5 +---- tests/integration/cqlengine/model/test_udts.py | 5 +---- .../cqlengine/operators/test_where_operators.py | 5 +---- tests/integration/cqlengine/query/test_named.py | 5 +---- tests/integration/cqlengine/query/test_queryset.py | 5 +---- .../cqlengine/statements/test_assignment_clauses.py | 5 +---- .../cqlengine/statements/test_base_statement.py | 5 +---- .../cqlengine/statements/test_insert_statement.py | 5 +---- .../cqlengine/statements/test_select_statement.py | 5 +---- .../cqlengine/statements/test_update_statement.py | 5 +---- .../integration/cqlengine/statements/test_where_clause.py | 5 +---- tests/integration/cqlengine/test_ifexists.py | 5 +---- tests/integration/cqlengine/test_ifnotexists.py | 5 +---- tests/integration/cqlengine/test_lwt_conditional.py | 5 +---- tests/integration/cqlengine/test_ttl.py | 5 +---- tests/integration/long/__init__.py | 5 +---- tests/integration/long/test_consistency.py | 5 +---- tests/integration/long/test_failure_types.py | 5 +---- tests/integration/long/test_ipv6.py | 5 +---- tests/integration/long/test_large_data.py | 5 +---- tests/integration/long/test_loadbalancingpolicies.py | 5 +---- tests/integration/long/test_policies.py | 5 +---- tests/integration/long/test_schema.py | 5 +---- tests/integration/long/test_ssl.py | 5 +---- tests/integration/simulacron/__init__.py | 5 +---- tests/integration/simulacron/advanced/test_insights.py | 5 +---- tests/integration/simulacron/test_cluster.py | 5 +---- tests/integration/simulacron/test_connection.py | 5 +---- tests/integration/simulacron/test_empty_column.py | 5 +---- tests/integration/simulacron/test_endpoint.py | 5 +---- tests/integration/simulacron/test_policies.py | 5 +---- tests/integration/standard/__init__.py | 5 +---- tests/integration/standard/test_authentication.py | 5 +---- tests/integration/standard/test_client_warnings.py | 5 +---- tests/integration/standard/test_cluster.py | 5 +---- tests/integration/standard/test_concurrent.py | 5 +---- tests/integration/standard/test_connection.py | 5 +---- tests/integration/standard/test_control_connection.py | 5 +---- tests/integration/standard/test_custom_cluster.py | 5 +---- tests/integration/standard/test_custom_payload.py | 5 +---- tests/integration/standard/test_custom_protocol_handler.py | 5 +---- .../integration/standard/test_cython_protocol_handlers.py | 5 +---- tests/integration/standard/test_dse.py | 5 +---- tests/integration/standard/test_metadata.py | 5 +---- tests/integration/standard/test_metrics.py | 5 +---- tests/integration/standard/test_policies.py | 5 +---- tests/integration/standard/test_prepared_statements.py | 5 +---- tests/integration/standard/test_query.py | 5 +---- tests/integration/standard/test_query_paging.py | 5 +---- tests/integration/standard/test_routing.py | 5 +---- tests/integration/standard/test_row_factories.py | 5 +---- tests/integration/standard/test_single_interface.py | 5 +---- tests/integration/standard/test_types.py | 5 +---- tests/integration/standard/test_udts.py | 5 +---- tests/integration/upgrade/__init__.py | 5 +---- tests/integration/upgrade/test_upgrade.py | 5 +---- tests/stress_tests/test_load.py | 5 +---- tests/stress_tests/test_multi_inserts.py | 5 +---- tests/unit/advanced/cloud/test_cloud.py | 5 +---- tests/unit/advanced/test_auth.py | 5 +---- tests/unit/advanced/test_execution_profile.py | 5 +---- tests/unit/advanced/test_geometry.py | 5 +---- tests/unit/advanced/test_graph.py | 5 +---- tests/unit/advanced/test_insights.py | 5 +---- tests/unit/advanced/test_metadata.py | 5 +---- tests/unit/advanced/test_policies.py | 5 +---- tests/unit/cqlengine/test_columns.py | 5 +---- tests/unit/cqlengine/test_connection.py | 5 +---- tests/unit/cqlengine/test_udt.py | 5 +---- tests/unit/cython/test_bytesio.py | 5 +---- tests/unit/cython/test_types.py | 5 +---- tests/unit/cython/test_utils.py | 7 ++----- tests/unit/cython/utils.py | 5 +---- tests/unit/io/test_asyncorereactor.py | 5 +---- tests/unit/io/test_eventletreactor.py | 5 +---- tests/unit/io/test_geventreactor.py | 5 +---- tests/unit/io/test_libevreactor.py | 5 +---- tests/unit/io/test_twistedreactor.py | 5 +---- tests/unit/io/utils.py | 5 +---- tests/unit/test_auth.py | 5 +---- tests/unit/test_cluster.py | 5 +---- tests/unit/test_concurrent.py | 5 +---- tests/unit/test_connection.py | 5 +---- tests/unit/test_control_connection.py | 5 +---- tests/unit/test_endpoints.py | 5 +---- tests/unit/test_exception.py | 5 +---- tests/unit/test_host_connection_pool.py | 5 +---- tests/unit/test_marshalling.py | 5 +---- tests/unit/test_metadata.py | 5 +---- tests/unit/test_orderedmap.py | 5 +---- tests/unit/test_parameter_binding.py | 5 +---- tests/unit/test_policies.py | 5 +---- tests/unit/test_protocol.py | 5 +---- tests/unit/test_query.py | 5 +---- tests/unit/test_response_future.py | 5 +---- tests/unit/test_resultset.py | 5 +---- tests/unit/test_row_factories.py | 5 +---- tests/unit/test_segment.py | 5 +---- tests/unit/test_sortedset.py | 5 +---- tests/unit/test_time_util.py | 5 +---- tests/unit/test_timestamps.py | 5 +---- tests/unit/test_types.py | 5 +---- tests/unit/test_util_types.py | 5 +---- 128 files changed, 128 insertions(+), 510 deletions(-) diff --git a/test-requirements.txt b/test-requirements.txt index 9e62bfdee8..996cf4341f 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,6 @@ scales nose mock>1.1 ccm>=2.1.2 -unittest2 pytz sure pure-sasl diff --git a/tests/__init__.py b/tests/__init__.py index cea5a872c6..48c589c424 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import sys import socket diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 70ec11c213..d3c3332649 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -18,10 +18,7 @@ from tests import connection_class, EVENT_LOOP_MANAGER Cluster.connection_class = connection_class -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from packaging.version import Version import logging diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index b2820e037b..e2fa1a4a4a 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from six.moves.urllib.request import build_opener, Request, HTTPHandler import re diff --git a/tests/integration/advanced/graph/fluent/__init__.py b/tests/integration/advanced/graph/fluent/__init__.py index 3bb81e78e3..3962029f45 100644 --- a/tests/integration/advanced/graph/fluent/__init__.py +++ b/tests/integration/advanced/graph/fluent/__init__.py @@ -35,10 +35,7 @@ VertexLabel) from tests.integration import requiredse -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import ipaddress diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index 02611c12c0..d46a74a146 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -28,10 +28,7 @@ from tests.integration.advanced.graph.fluent import ( BaseExplicitExecutionTest, create_traversal_profiles, check_equality_base) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest @greaterthanorequaldse60 diff --git a/tests/integration/advanced/graph/test_graph_datatype.py b/tests/integration/advanced/graph/test_graph_datatype.py index 222b1f5ace..0445ce8030 100644 --- a/tests/integration/advanced/graph/test_graph_datatype.py +++ b/tests/integration/advanced/graph/test_graph_datatype.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import time import six diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 0eda67894d..9bc23e611a 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -22,10 +22,7 @@ import json import time -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import OperationTimedOut, ConsistencyLevel, InvalidRequest from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, NoHostAvailable diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index b3af6fa5d1..8228bfe220 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -20,10 +20,7 @@ greaterthanorequaldse68, use_single_node, DSE_VERSION, requiredse, TestCluster) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import time diff --git a/tests/integration/advanced/test_auth.py b/tests/integration/advanced/test_auth.py index 7e9aa8c23e..3443419ab4 100644 --- a/tests/integration/advanced/test_auth.py +++ b/tests/integration/advanced/test_auth.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import os import subprocess diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py index c5f1cbfff3..2e75d7061d 100644 --- a/tests/integration/advanced/test_cont_paging.py +++ b/tests/integration/advanced/test_cont_paging.py @@ -18,10 +18,7 @@ import logging log = logging.getLogger(__name__) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from itertools import cycle, count from six.moves import range diff --git a/tests/integration/advanced/test_cqlengine_where_operators.py b/tests/integration/advanced/test_cqlengine_where_operators.py index 8ade3db09d..b2e4d4ba9e 100644 --- a/tests/integration/advanced/test_cqlengine_where_operators.py +++ b/tests/integration/advanced/test_cqlengine_where_operators.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import os import time diff --git a/tests/integration/advanced/test_geometry.py b/tests/integration/advanced/test_geometry.py index 8bee144d19..6a6737bd50 100644 --- a/tests/integration/advanced/test_geometry.py +++ b/tests/integration/advanced/test_geometry.py @@ -18,10 +18,7 @@ from cassandra.util import OrderedMap, sortedset from collections import namedtuple -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid1 from cassandra.util import Point, LineString, Polygon from cassandra.cqltypes import LineStringType, PointType, PolygonType diff --git a/tests/integration/advanced/test_unixsocketendpoint.py b/tests/integration/advanced/test_unixsocketendpoint.py index 10cbc1b362..f2795d1a68 100644 --- a/tests/integration/advanced/test_unixsocketendpoint.py +++ b/tests/integration/advanced/test_unixsocketendpoint.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import time import subprocess diff --git a/tests/integration/cloud/__init__.py b/tests/integration/cloud/__init__.py index ca05ae4ce5..a6a4ab7a5d 100644 --- a/tests/integration/cloud/__init__.py +++ b/tests/integration/cloud/__init__.py @@ -13,10 +13,7 @@ # limitations under the License from cassandra.cluster import Cluster -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import os import subprocess diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index e0b9e2d382..ef4909a257 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -18,10 +18,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six from ssl import SSLContext, PROTOCOL_TLS diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index e68baaabf1..cd8f031ed1 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -14,10 +14,7 @@ import os import warnings -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import ConsistencyLevel from cassandra.cqlengine import connection diff --git a/tests/integration/cqlengine/advanced/test_cont_paging.py b/tests/integration/cqlengine/advanced/test_cont_paging.py index 38b4355312..89e05950e3 100644 --- a/tests/integration/cqlengine/advanced/test_cont_paging.py +++ b/tests/integration/cqlengine/advanced/test_cont_paging.py @@ -14,10 +14,7 @@ -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from packaging.version import Version diff --git a/tests/integration/cqlengine/base.py b/tests/integration/cqlengine/base.py index 8a6903350f..bdb62aa2a3 100644 --- a/tests/integration/cqlengine/base.py +++ b/tests/integration/cqlengine/base.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import sys diff --git a/tests/integration/cqlengine/columns/test_static_column.py b/tests/integration/cqlengine/columns/test_static_column.py index 69e222d2b9..0e8ace8c8f 100644 --- a/tests/integration/cqlengine/columns/test_static_column.py +++ b/tests/integration/cqlengine/columns/test_static_column.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid4 diff --git a/tests/integration/cqlengine/columns/test_validation.py b/tests/integration/cqlengine/columns/test_validation.py index 69682fd68d..21fe1581ff 100644 --- a/tests/integration/cqlengine/columns/test_validation.py +++ b/tests/integration/cqlengine/columns/test_validation.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import sys from datetime import datetime, timedelta, date, tzinfo, time diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 243c2b0fdb..2c82fe16f7 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from datetime import datetime, timedelta, time from decimal import Decimal diff --git a/tests/integration/cqlengine/connections/test_connection.py b/tests/integration/cqlengine/connections/test_connection.py index c46df31280..92b6992573 100644 --- a/tests/integration/cqlengine/connections/test_connection.py +++ b/tests/integration/cqlengine/connections/test_connection.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import ConsistencyLevel diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 7edb3e71dd..f37db5e51f 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock import logging diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index bbd9e0cbb6..859facf0e1 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import patch diff --git a/tests/integration/cqlengine/model/test_model_io.py b/tests/integration/cqlengine/model/test_model_io.py index 32ace5363f..3c4088cc83 100644 --- a/tests/integration/cqlengine/model/test_model_io.py +++ b/tests/integration/cqlengine/model/test_model_io.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid4, UUID import random diff --git a/tests/integration/cqlengine/model/test_udts.py b/tests/integration/cqlengine/model/test_udts.py index 82973436ac..1e3adf9a71 100644 --- a/tests/integration/cqlengine/model/test_udts.py +++ b/tests/integration/cqlengine/model/test_udts.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from datetime import datetime, date, time from decimal import Decimal diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index fdfce1f0b8..555af11025 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.operators import * diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 3a6f83b32e..eb85bbbb85 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import ConsistencyLevel from cassandra.cqlengine import operators diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 6bc9d701b8..ec5044b707 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -13,10 +13,7 @@ # limitations under the License. from __future__ import absolute_import -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from datetime import datetime from uuid import uuid4 diff --git a/tests/integration/cqlengine/statements/test_assignment_clauses.py b/tests/integration/cqlengine/statements/test_assignment_clauses.py index 594224d72d..82bf067cb4 100644 --- a/tests/integration/cqlengine/statements/test_assignment_clauses.py +++ b/tests/integration/cqlengine/statements/test_assignment_clauses.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 474c45d02b..3b5be60520 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid4 import six diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index 3bf90ec313..a1dcd08968 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index 90c14bcfb6..c6d1ac69f4 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import SelectStatement, WhereClause diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index c6ed228d91..99105069dd 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.columns import Column, Set, List, Text from cassandra.cqlengine.operators import * diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index 3173320f7c..21671be086 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six from cassandra.cqlengine.operators import EqualsOperator diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index 2797edd846..1189bc0ff5 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock from uuid import uuid4 diff --git a/tests/integration/cqlengine/test_ifnotexists.py b/tests/integration/cqlengine/test_ifnotexists.py index 206101f1b2..260e132731 100644 --- a/tests/integration/cqlengine/test_ifnotexists.py +++ b/tests/integration/cqlengine/test_ifnotexists.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock from uuid import uuid4 diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index 1c418ae6d8..f8459a95ad 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock import six diff --git a/tests/integration/cqlengine/test_ttl.py b/tests/integration/cqlengine/test_ttl.py index a9aa32db94..55457ff56a 100644 --- a/tests/integration/cqlengine/test_ttl.py +++ b/tests/integration/cqlengine/test_ttl.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from packaging.version import Version diff --git a/tests/integration/long/__init__.py b/tests/integration/long/__init__.py index 447f4885cc..19e7ed2c64 100644 --- a/tests/integration/long/__init__.py +++ b/tests/integration/long/__init__.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest try: from ccmlib import common diff --git a/tests/integration/long/test_consistency.py b/tests/integration/long/test_consistency.py index bbf446861a..0b9ebab3ed 100644 --- a/tests/integration/long/test_consistency.py +++ b/tests/integration/long/test_consistency.py @@ -28,10 +28,7 @@ force_stop, create_schema, wait_for_down, wait_for_up, start, CoordinatorStats ) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest ALL_CONSISTENCY_LEVELS = { ConsistencyLevel.ANY, ConsistencyLevel.ONE, ConsistencyLevel.TWO, ConsistencyLevel.QUORUM, diff --git a/tests/integration/long/test_failure_types.py b/tests/integration/long/test_failure_types.py index 6bdff8d15d..2ca01066b0 100644 --- a/tests/integration/long/test_failure_types.py +++ b/tests/integration/long/test_failure_types.py @@ -34,10 +34,7 @@ local, CASSANDRA_VERSION, TestCluster) -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index a49c1677e8..b63fdebcf3 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -30,10 +30,7 @@ except ImportError: LibevConnection = None -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest # If more modules do IPV6 testing, this can be moved down to integration.__init__. diff --git a/tests/integration/long/test_large_data.py b/tests/integration/long/test_large_data.py index ce7e4398da..59873204a4 100644 --- a/tests/integration/long/test_large_data.py +++ b/tests/integration/long/test_large_data.py @@ -27,10 +27,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster from tests.integration.long.utils import create_schema -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/long/test_loadbalancingpolicies.py b/tests/integration/long/test_loadbalancingpolicies.py index f245569a80..7848a21b1d 100644 --- a/tests/integration/long/test_loadbalancingpolicies.py +++ b/tests/integration/long/test_loadbalancingpolicies.py @@ -36,10 +36,7 @@ wait_for_down, decommission, start, bootstrap, stop, IP_FORMAT) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/long/test_policies.py b/tests/integration/long/test_policies.py index 0648e6cc93..680d0d7980 100644 --- a/tests/integration/long/test_policies.py +++ b/tests/integration/long/test_policies.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import ConsistencyLevel, Unavailable from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT diff --git a/tests/integration/long/test_schema.py b/tests/integration/long/test_schema.py index e2945a117b..f1cc80a17a 100644 --- a/tests/integration/long/test_schema.py +++ b/tests/integration/long/test_schema.py @@ -21,10 +21,7 @@ import time -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 4de46f4649..69285001f8 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest import os, sys, traceback, logging, ssl, time, math, uuid from cassandra.cluster import NoHostAvailable diff --git a/tests/integration/simulacron/__init__.py b/tests/integration/simulacron/__init__.py index 6543265db2..c959fd6e08 100644 --- a/tests/integration/simulacron/__init__.py +++ b/tests/integration/simulacron/__init__.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from tests.integration import requiredse, CASSANDRA_VERSION, DSE_VERSION, SIMULACRON_JAR, PROTOCOL_VERSION from tests.integration.simulacron.utils import ( diff --git a/tests/integration/simulacron/advanced/test_insights.py b/tests/integration/simulacron/advanced/test_insights.py index 3da14659af..5ddae4ec7c 100644 --- a/tests/integration/simulacron/advanced/test_insights.py +++ b/tests/integration/simulacron/advanced/test_insights.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import time import json diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index b89f564f08..f859a5dd05 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging from packaging.version import Version diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 4ef97247a6..0c70d0a1e9 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import time diff --git a/tests/integration/simulacron/test_empty_column.py b/tests/integration/simulacron/test_empty_column.py index 91c76985e1..046aaacf79 100644 --- a/tests/integration/simulacron/test_empty_column.py +++ b/tests/integration/simulacron/test_empty_column.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import namedtuple, OrderedDict diff --git a/tests/integration/simulacron/test_endpoint.py b/tests/integration/simulacron/test_endpoint.py index 691fcc8718..9e2d91b6d3 100644 --- a/tests/integration/simulacron/test_endpoint.py +++ b/tests/integration/simulacron/test_endpoint.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from functools import total_ordering diff --git a/tests/integration/simulacron/test_policies.py b/tests/integration/simulacron/test_policies.py index da093be43c..6d0d081889 100644 --- a/tests/integration/simulacron/test_policies.py +++ b/tests/integration/simulacron/test_policies.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import OperationTimedOut, WriteTimeout from cassandra.cluster import Cluster, ExecutionProfile, ResponseFuture, EXEC_PROFILE_DEFAULT, NoHostAvailable diff --git a/tests/integration/standard/__init__.py b/tests/integration/standard/__init__.py index e54b6fd6bd..1f14bd6ec4 100644 --- a/tests/integration/standard/__init__.py +++ b/tests/integration/standard/__init__.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest try: from ccmlib import common diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 9755c5098b..189da45c94 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -22,10 +22,7 @@ USE_CASS_EXTERNAL, start_cluster_wait_for_up, TestCluster from tests.integration.util import assert_quiescent_pool_state -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index c5ce5dc726..5f63b5265a 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest from cassandra.query import BatchStatement diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index c7d8266fd9..a15c7f32e2 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import deque from copy import copy diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 8bd65c7f6f..ad4ef47473 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -26,10 +26,7 @@ from six import next -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index aaa5a27dfd..76c8216d41 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from functools import partial from mock import patch diff --git a/tests/integration/standard/test_control_connection.py b/tests/integration/standard/test_control_connection.py index db7cff8506..9d579476d2 100644 --- a/tests/integration/standard/test_control_connection.py +++ b/tests/integration/standard/test_control_connection.py @@ -16,10 +16,7 @@ # from cassandra import InvalidRequest -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.protocol import ConfigurationException diff --git a/tests/integration/standard/test_custom_cluster.py b/tests/integration/standard/test_custom_cluster.py index 84e0737086..d0f10d51db 100644 --- a/tests/integration/standard/test_custom_cluster.py +++ b/tests/integration/standard/test_custom_cluster.py @@ -16,10 +16,7 @@ from tests.integration import use_singledc, get_cluster, remove_cluster, local, TestCluster from tests.util import wait_until, wait_until_not_raised -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest def setup_module(): diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index 9906a8243e..3290852862 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest import six diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index bf549511c8..7443ce0748 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.protocol import ProtocolHandler, ResultMessage, QueryMessage, UUIDType, read_int from cassandra.query import tuple_factory, SimpleStatement diff --git a/tests/integration/standard/test_cython_protocol_handlers.py b/tests/integration/standard/test_cython_protocol_handlers.py index 4e45553be2..9cb5914f16 100644 --- a/tests/integration/standard/test_cython_protocol_handlers.py +++ b/tests/integration/standard/test_cython_protocol_handlers.py @@ -2,10 +2,7 @@ # Based on test_custom_protocol_handler.py -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest from itertools import count diff --git a/tests/integration/standard/test_dse.py b/tests/integration/standard/test_dse.py index 1b9b5bef84..7b96094b3f 100644 --- a/tests/integration/standard/test_dse.py +++ b/tests/integration/standard/test_dse.py @@ -21,10 +21,7 @@ from tests.integration import (execute_until_pass, execute_with_long_wait_retry, use_cluster, TestCluster) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest CCM_IS_DSE = (os.environ.get('CCM_IS_DSE', None) == 'true') diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index bd556f357d..e20f1f0640 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import defaultdict import difflib diff --git a/tests/integration/standard/test_metrics.py b/tests/integration/standard/test_metrics.py index 676a5340ef..ddc1091dc6 100644 --- a/tests/integration/standard/test_metrics.py +++ b/tests/integration/standard/test_metrics.py @@ -17,10 +17,7 @@ from cassandra.connection import ConnectionShutdown from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, FallthroughRetryPolicy -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.query import SimpleStatement from cassandra import ConsistencyLevel, WriteTimeout, Unavailable, ReadTimeout diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 24facf42a0..46e91918ac 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 5c79f27346..1ed48d2964 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -15,10 +15,7 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import InvalidRequest, DriverException from cassandra import ConsistencyLevel, ProtocolVersion diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index ea0e326ff5..8d2a3d74e2 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -15,10 +15,7 @@ from cassandra.concurrent import execute_concurrent from cassandra import DriverException -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging from cassandra import ProtocolVersion from cassandra import ConsistencyLevel, Unavailable, InvalidRequest, cluster diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index dac4ec5ce3..8e0ca8becc 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -16,10 +16,7 @@ import logging log = logging.getLogger(__name__) -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from itertools import cycle, count from six.moves import range diff --git a/tests/integration/standard/test_routing.py b/tests/integration/standard/test_routing.py index e1dabba49a..47697ee9c8 100644 --- a/tests/integration/standard/test_routing.py +++ b/tests/integration/standard/test_routing.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from uuid import uuid1 diff --git a/tests/integration/standard/test_row_factories.py b/tests/integration/standard/test_row_factories.py index 93f25d9276..6855e8a410 100644 --- a/tests/integration/standard/test_row_factories.py +++ b/tests/integration/standard/test_row_factories.py @@ -15,10 +15,7 @@ from tests.integration import get_server_versions, use_singledc, \ BasicSharedKeyspaceUnitTestCaseWFunctionTable, BasicSharedKeyspaceUnitTestCase, execute_until_pass, TestCluster -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cluster import ResultSet, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.query import tuple_factory, named_tuple_factory, dict_factory, ordered_dict_factory diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index 91451a52a0..4677eff641 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 0592b7d737..f69e88c64f 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from datetime import datetime import math diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 6d9676f25e..4c7826fb98 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import namedtuple from functools import partial diff --git a/tests/integration/upgrade/__init__.py b/tests/integration/upgrade/__init__.py index d2b9076bc2..e307a3e3cc 100644 --- a/tests/integration/upgrade/__init__.py +++ b/tests/integration/upgrade/__init__.py @@ -27,10 +27,7 @@ import time import logging -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest def setup_module(): diff --git a/tests/integration/upgrade/test_upgrade.py b/tests/integration/upgrade/test_upgrade.py index 31df55c02c..63e1a64b9d 100644 --- a/tests/integration/upgrade/test_upgrade.py +++ b/tests/integration/upgrade/test_upgrade.py @@ -20,10 +20,7 @@ from cassandra.policies import ConstantSpeculativeExecutionPolicy from tests.integration.upgrade import UpgradeBase, UpgradeBaseAuth, UpgradePath, upgrade_paths -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest # Previous Cassandra upgrade diff --git a/tests/stress_tests/test_load.py b/tests/stress_tests/test_load.py index a9771147ce..3492ff2923 100644 --- a/tests/stress_tests/test_load.py +++ b/tests/stress_tests/test_load.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import gc diff --git a/tests/stress_tests/test_multi_inserts.py b/tests/stress_tests/test_multi_inserts.py index 65bbe2a4e4..84dfc5e6f7 100644 --- a/tests/stress_tests/test_multi_inserts.py +++ b/tests/stress_tests/test_multi_inserts.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest import os from cassandra.cluster import Cluster diff --git a/tests/unit/advanced/cloud/test_cloud.py b/tests/unit/advanced/cloud/test_cloud.py index ab18f0af72..a7cd83a8ce 100644 --- a/tests/unit/advanced/cloud/test_cloud.py +++ b/tests/unit/advanced/cloud/test_cloud.py @@ -11,10 +11,7 @@ import shutil import six -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import DriverException from cassandra.datastax import cloud diff --git a/tests/unit/advanced/test_auth.py b/tests/unit/advanced/test_auth.py index bb411afe2b..840073e9e1 100644 --- a/tests/unit/advanced/test_auth.py +++ b/tests/unit/advanced/test_auth.py @@ -15,10 +15,7 @@ import os from puresasl import QOP -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.auth import DSEGSSAPIAuthProvider diff --git a/tests/unit/advanced/test_execution_profile.py b/tests/unit/advanced/test_execution_profile.py index 8592f56a44..478322f95b 100644 --- a/tests/unit/advanced/test_execution_profile.py +++ b/tests/unit/advanced/test_execution_profile.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cluster import GraphExecutionProfile, GraphAnalyticsExecutionProfile from cassandra.graph import GraphOptions diff --git a/tests/unit/advanced/test_geometry.py b/tests/unit/advanced/test_geometry.py index 4fa2644ff2..d85f1bc293 100644 --- a/tests/unit/advanced/test_geometry.py +++ b/tests/unit/advanced/test_geometry.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import struct import math diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index f25a229f42..25dd289dba 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -15,10 +15,7 @@ import warnings import json -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/unit/advanced/test_insights.py b/tests/unit/advanced/test_insights.py index 2cc170e485..4f1dd7ac12 100644 --- a/tests/unit/advanced/test_insights.py +++ b/tests/unit/advanced/test_insights.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging from mock import sentinel diff --git a/tests/unit/advanced/test_metadata.py b/tests/unit/advanced/test_metadata.py index addd514169..cf730ebec5 100644 --- a/tests/unit/advanced/test_metadata.py +++ b/tests/unit/advanced/test_metadata.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.metadata import ( KeyspaceMetadata, TableMetadataDSE68, diff --git a/tests/unit/advanced/test_policies.py b/tests/unit/advanced/test_policies.py index 79e7410799..b8e4a4e757 100644 --- a/tests/unit/advanced/test_policies.py +++ b/tests/unit/advanced/test_policies.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock diff --git a/tests/unit/cqlengine/test_columns.py b/tests/unit/cqlengine/test_columns.py index bcb174a8c0..a7bf74ec23 100644 --- a/tests/unit/cqlengine/test_columns.py +++ b/tests/unit/cqlengine/test_columns.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine.columns import Column diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 9f8e500c6b..8e3a0b75bd 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cluster import _ConfigMode from cassandra.cqlengine import connection diff --git a/tests/unit/cqlengine/test_udt.py b/tests/unit/cqlengine/test_udt.py index ebe1139fd0..0a126513d5 100644 --- a/tests/unit/cqlengine/test_udt.py +++ b/tests/unit/cqlengine/test_udt.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.cqlengine import columns from cassandra.cqlengine.models import Model diff --git a/tests/unit/cython/test_bytesio.py b/tests/unit/cython/test_bytesio.py index a156fc1272..cd4ea86f52 100644 --- a/tests/unit/cython/test_bytesio.py +++ b/tests/unit/cython/test_bytesio.py @@ -15,10 +15,7 @@ from tests.unit.cython.utils import cyimport, cythontest bytesio_testhelper = cyimport('tests.unit.cython.bytesio_testhelper') -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest class BytesIOTest(unittest.TestCase): diff --git a/tests/unit/cython/test_types.py b/tests/unit/cython/test_types.py index a0d2138c6d..545b82fc11 100644 --- a/tests/unit/cython/test_types.py +++ b/tests/unit/cython/test_types.py @@ -15,10 +15,7 @@ from tests.unit.cython.utils import cyimport, cythontest types_testhelper = cyimport('tests.unit.cython.types_testhelper') -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest class TypesTest(unittest.TestCase): diff --git a/tests/unit/cython/test_utils.py b/tests/unit/cython/test_utils.py index dc8745e471..0e79c235d8 100644 --- a/tests/unit/cython/test_utils.py +++ b/tests/unit/cython/test_utils.py @@ -15,10 +15,7 @@ from tests.unit.cython.utils import cyimport, cythontest utils_testhelper = cyimport('tests.unit.cython.utils_testhelper') -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest class UtilsTest(unittest.TestCase): @@ -26,4 +23,4 @@ class UtilsTest(unittest.TestCase): @cythontest def test_datetime_from_timestamp(self): - utils_testhelper.test_datetime_from_timestamp(self.assertEqual) \ No newline at end of file + utils_testhelper.test_datetime_from_timestamp(self.assertEqual) diff --git a/tests/unit/cython/utils.py b/tests/unit/cython/utils.py index 7f8be22ce0..fc21597c7d 100644 --- a/tests/unit/cython/utils.py +++ b/tests/unit/cython/utils.py @@ -18,10 +18,7 @@ except ImportError: VERIFY_CYTHON = False -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest def cyimport(import_path): """ diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 4e0e540327..6f493896d0 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import patch import socket diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index ce828cd6d8..e2b6a533a8 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from tests.unit.io.utils import TimerTestMixin from tests import notpypy, EVENT_LOOP_MANAGER diff --git a/tests/unit/io/test_geventreactor.py b/tests/unit/io/test_geventreactor.py index ec64ce34c1..466b9ae5d5 100644 --- a/tests/unit/io/test_geventreactor.py +++ b/tests/unit/io/test_geventreactor.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from tests.unit.io.utils import TimerTestMixin diff --git a/tests/unit/io/test_libevreactor.py b/tests/unit/io/test_libevreactor.py index a02458edc8..67ab5fc7d6 100644 --- a/tests/unit/io/test_libevreactor.py +++ b/tests/unit/io/test_libevreactor.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import patch, Mock import weakref diff --git a/tests/unit/io/test_twistedreactor.py b/tests/unit/io/test_twistedreactor.py index e7c34cb4b5..b426a820c4 100644 --- a/tests/unit/io/test_twistedreactor.py +++ b/tests/unit/io/test_twistedreactor.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest from mock import Mock, patch from cassandra.connection import DefaultEndPoint diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 848513f031..ddfa2c3198 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -37,10 +37,7 @@ from socket import error as socket_error import ssl -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import time diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 7b4196f831..68cce526e7 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -16,10 +16,7 @@ import six from cassandra.auth import PlainTextAuthenticator -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest class TestPlainTextAuthenticator(unittest.TestCase): diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 620f642084..6755f118fd 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import logging import six diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index cc6c12cdaa..9f67531a3c 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -13,10 +13,7 @@ # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from itertools import cycle from mock import Mock diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 21b8862772..f06b67ebe0 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock, ANY, call, patch import six diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index efad1ca5c9..276b2849ca 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/unit/test_endpoints.py b/tests/unit/test_endpoints.py index 2452e267ba..18f245e64b 100644 --- a/tests/unit/test_endpoints.py +++ b/tests/unit/test_endpoints.py @@ -6,10 +6,7 @@ # You may obtain a copy of the License at # # http://www.datastax.com/terms/datastax-dse-driver-license-terms -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import itertools diff --git a/tests/unit/test_exception.py b/tests/unit/test_exception.py index 3a082f7363..b39b22239c 100644 --- a/tests/unit/test_exception.py +++ b/tests/unit/test_exception.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest +import unittest from cassandra import Unavailable, Timeout, ConsistencyLevel import re diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index bda48dc76b..86d4bf9843 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock, NonCallableMagicMock from threading import Thread, Event, Lock diff --git a/tests/unit/test_marshalling.py b/tests/unit/test_marshalling.py index c2363e0adc..1fdbfa6a4b 100644 --- a/tests/unit/test_marshalling.py +++ b/tests/unit/test_marshalling.py @@ -15,10 +15,7 @@ from cassandra import ProtocolVersion -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import platform from datetime import datetime, date diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index b2143f8c20..b0a8b63b16 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from binascii import unhexlify import logging diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index f2baab40f0..9ca5699204 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.util import OrderedMap, OrderedMapSerializedKey from cassandra.cqltypes import EMPTY, UTF8Type, lookup_casstype diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 228f3f4432..8820114dc3 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.encoder import Encoder from cassandra.protocol import ColumnMetadata diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 5c0c11281b..a31b4f4c1b 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from itertools import islice, cycle from mock import Mock, patch, call diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index b43b21eeff..95a7a12b11 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index 7c2bfc0d14..2a2901aaff 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index f76a2d677b..dbd8764ad9 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from collections import deque from threading import RLock diff --git a/tests/unit/test_resultset.py b/tests/unit/test_resultset.py index b37c3a2594..97002d90d7 100644 --- a/tests/unit/test_resultset.py +++ b/tests/unit/test_resultset.py @@ -13,10 +13,7 @@ # limitations under the License. from cassandra.query import named_tuple_factory, dict_factory, tuple_factory -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from mock import Mock, PropertyMock, patch diff --git a/tests/unit/test_row_factories.py b/tests/unit/test_row_factories.py index 13049ba034..70691ad8fd 100644 --- a/tests/unit/test_row_factories.py +++ b/tests/unit/test_row_factories.py @@ -20,10 +20,7 @@ import sys -try: - from unittest import TestCase -except ImportError: - from unittest2 import TestCase +from unittest import TestCase log = logging.getLogger(__name__) diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py index fc49339d68..f794b38b1d 100644 --- a/tests/unit/test_segment.py +++ b/tests/unit/test_segment.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import six diff --git a/tests/unit/test_sortedset.py b/tests/unit/test_sortedset.py index 3845c2c31c..49c3658df8 100644 --- a/tests/unit/test_sortedset.py +++ b/tests/unit/test_sortedset.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra.util import sortedset from cassandra.cqltypes import EMPTY diff --git a/tests/unit/test_time_util.py b/tests/unit/test_time_util.py index 7025f151d6..2605992d1c 100644 --- a/tests/unit/test_time_util.py +++ b/tests/unit/test_time_util.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest from cassandra import marshal from cassandra import util diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index 8903fbc99b..58958cff03 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import mock diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index 562fd2c899..af3b327ef8 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import datetime import tempfile diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index b7dc837249..5d6058b394 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -try: - import unittest2 as unittest -except ImportError: - import unittest # noqa +import unittest import datetime From e4e290fe2dc5f15f677f33bbb40ed115fce2406d Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 2 Feb 2022 10:21:02 -0600 Subject: [PATCH 009/551] Merge pull request #1119 from datastax/python-1290 PYTHON-1290 Convert asyncio reactor away from @asyncio.coroutine --- cassandra/io/asyncioreactor.py | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index 7cb0444a32..ab0e90ae09 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -46,9 +46,8 @@ def __init__(self, timeout, callback, loop): self._handle = asyncio.run_coroutine_threadsafe(delayed, loop=loop) @staticmethod - @asyncio.coroutine - def _call_delayed_coro(timeout, callback, loop): - yield from asyncio.sleep(timeout, loop=loop) + async def _call_delayed_coro(timeout, callback, loop): + await asyncio.sleep(timeout, loop=loop) return callback() def __lt__(self, other): @@ -136,8 +135,7 @@ def close(self): self._close(), loop=self._loop ) - @asyncio.coroutine - def _close(self): + async def _close(self): log.debug("Closing connection (%s) to %s" % (id(self), self.endpoint)) if self._write_watcher: self._write_watcher.cancel() @@ -174,21 +172,19 @@ def push(self, data): # avoid races/hangs by just scheduling this, not using threadsafe self._loop.create_task(self._push_msg(chunks)) - @asyncio.coroutine - def _push_msg(self, chunks): + async def _push_msg(self, chunks): # This lock ensures all chunks of a message are sequential in the Queue - with (yield from self._write_queue_lock): + with await self._write_queue_lock: for chunk in chunks: self._write_queue.put_nowait(chunk) - @asyncio.coroutine - def handle_write(self): + async def handle_write(self): while True: try: - next_msg = yield from self._write_queue.get() + next_msg = await self._write_queue.get() if next_msg: - yield from self._loop.sock_sendall(self._socket, next_msg) + await self._loop.sock_sendall(self._socket, next_msg) except socket.error as err: log.debug("Exception in send for %s: %s", self, err) self.defunct(err) @@ -196,18 +192,19 @@ def handle_write(self): except asyncio.CancelledError: return - @asyncio.coroutine - def handle_read(self): + async def handle_read(self): while True: try: - buf = yield from self._loop.sock_recv(self._socket, self.in_buffer_size) + buf = await self._loop.sock_recv(self._socket, self.in_buffer_size) self._iobuf.write(buf) # sock_recv expects EWOULDBLOCK if socket provides no data, but # nonblocking ssl sockets raise these instead, so we handle them # ourselves by yielding to the event loop, where the socket will # get the reading/writing it "wants" before retrying except (ssl.SSLWantWriteError, ssl.SSLWantReadError): - yield + # Apparently the preferred way to yield to the event loop from within + # a native coroutine based on https://github.com/python/asyncio/issues/284 + await asyncio.sleep(0) continue except socket.error as err: log.debug("Exception during socket recv for %s: %s", From a2ba70f21e8702fcb184c517fd5c97fe9f40b2cd Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 7 Mar 2022 12:22:37 +0200 Subject: [PATCH 010/551] test_cluster: fix DeprecationWarningTest tests fix `DeprecationWarningTest` test to not fail if there are other warning that during the tests. --- tests/integration/standard/test_cluster.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 7e9232edca..76d3031a6f 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1573,9 +1573,11 @@ def test_deprecation_warnings_legacy_parameters(self): """ with warnings.catch_warnings(record=True) as w: TestCluster(load_balancing_policy=RoundRobinPolicy()) - self.assertEqual(len(w), 1) - self.assertIn("Legacy execution parameters will be removed in 4.0. Consider using execution profiles.", - str(w[0].message)) + logging.info(w) + self.assertGreaterEqual(len(w), 1) + self.assertTrue(any(["Legacy execution parameters will be removed in 4.0. " + "Consider using execution profiles." in + str(wa.message) for wa in w])) def test_deprecation_warnings_meta_refreshed(self): """ @@ -1591,11 +1593,11 @@ def test_deprecation_warnings_meta_refreshed(self): with warnings.catch_warnings(record=True) as w: cluster = TestCluster() cluster.set_meta_refresh_enabled(True) - self.assertEqual(len(w), 1) - self.assertIn("Cluster.set_meta_refresh_enabled is deprecated and will be removed in 4.0.", - str(w[0].message)) + logging.info(w) + self.assertGreaterEqual(len(w), 1) + self.assertTrue(any(["Cluster.set_meta_refresh_enabled is deprecated and will be removed in 4.0." in + str(wa.message) for wa in w])) - @unittest.expectedFailure def test_deprecation_warning_default_consistency_level(self): """ Tests the deprecation warning has been added when enabling @@ -1611,6 +1613,6 @@ def test_deprecation_warning_default_consistency_level(self): cluster = TestCluster() session = cluster.connect() session.default_consistency_level = ConsistencyLevel.ONE - self.assertEqual(len(w), 1) - self.assertIn("Setting the consistency level at the session level will be removed in 4.0", - str(w[0].message)) + self.assertGreaterEqual(len(w), 1) + self.assertTrue(any(["Setting the consistency level at the session level will be removed in 4.0" in + str(wa.message) for wa in w])) From 5b81e912ad8604b44a63ba3f151f6a9fe03e228a Mon Sep 17 00:00:00 2001 From: David Garcia Date: Wed, 9 Feb 2022 09:06:56 +0000 Subject: [PATCH 011/551] docs: update theme 1.1 --- .github/workflows/docs-pages.yaml | 35 +++++++++++ .github/workflows/docs-pages@v2.yaml | 33 ----------- .github/workflows/docs-pr.yaml | 30 ++++++++++ .github/workflows/docs-pr@v1.yaml | 28 --------- docs/Makefile | 8 +-- docs/_utils/setup.sh | 11 ---- docs/conf.py | 86 ++++++++++++---------------- docs/pyproject.toml | 25 ++++---- 8 files changed, 118 insertions(+), 138 deletions(-) create mode 100644 .github/workflows/docs-pages.yaml delete mode 100644 .github/workflows/docs-pages@v2.yaml create mode 100644 .github/workflows/docs-pr.yaml delete mode 100644 .github/workflows/docs-pr@v1.yaml delete mode 100755 docs/_utils/setup.sh diff --git a/.github/workflows/docs-pages.yaml b/.github/workflows/docs-pages.yaml new file mode 100644 index 0000000000..889affa11a --- /dev/null +++ b/.github/workflows/docs-pages.yaml @@ -0,0 +1,35 @@ +name: "Docs / Publish" + +on: + push: + branches: + - master + paths: + - "docs/**" + workflow_dispatch: + +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + persist-credentials: false + fetch-depth: 0 + - name: Set up Python + uses: actions/setup-python@v2.3.2 + with: + python-version: 3.7 + - name: Setup Cassandra dependencies + run: sudo apt-get install gcc python-dev libev4 libev-dev + - name: Build driver + run: python setup.py develop + - name: Set up Poetry + run: curl -sSL https://install.python-poetry.org | python - + - name: Build docs + run: make -C docs multiversion + - name: Deploy docs to GitHub Pages + run: ./docs/_utils/deploy.sh + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/docs-pages@v2.yaml b/.github/workflows/docs-pages@v2.yaml deleted file mode 100644 index a5cd2f2390..0000000000 --- a/.github/workflows/docs-pages@v2.yaml +++ /dev/null @@ -1,33 +0,0 @@ -name: "Docs / Publish" - -on: - push: - branches: - - master - paths: - - "docs/**" - workflow_dispatch: - -jobs: - release: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - persist-credentials: false - fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: 3.7 - - name: Setup Cassandra dependencies - run: sudo apt-get install gcc python-dev libev4 libev-dev - - name: Build driver - run: python setup.py develop - - name: Build docs - run: make -C docs multiversion - - name: Deploy docs to GitHub Pages - run: ./docs/_utils/deploy.sh - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml new file mode 100644 index 0000000000..e4d3366f79 --- /dev/null +++ b/.github/workflows/docs-pr.yaml @@ -0,0 +1,30 @@ +name: "Docs / Build PR" + +on: + pull_request: + branches: + - master + paths: + - "docs/**" + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + persist-credentials: false + fetch-depth: 0 + - name: Set up Python + uses: actions/setup-python@v2.3.2 + with: + python-version: 3.7 + - name: Setup Cassandra dependencies + run: sudo apt-get install gcc python-dev libev4 libev-dev + - name: Build driver + run: python setup.py develop + - name: Set up Poetry + run: curl -sSL https://install.python-poetry.org | python - + - name: Build docs + run: make -C docs test diff --git a/.github/workflows/docs-pr@v1.yaml b/.github/workflows/docs-pr@v1.yaml deleted file mode 100644 index 2cb972b840..0000000000 --- a/.github/workflows/docs-pr@v1.yaml +++ /dev/null @@ -1,28 +0,0 @@ -name: "Docs / Build PR" - -on: - pull_request: - branches: - - master - paths: - - "docs/**" - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - with: - persist-credentials: false - fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v1 - with: - python-version: 3.7 - - name: Setup Cassandra dependencies - run: sudo apt-get install gcc python-dev libev4 libev-dev - - name: Build driver - run: python setup.py develop - - name: Build docs - run: make -C docs test \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile index 0374c9de04..3423b9e723 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,5 +1,5 @@ # You can set these variables from the command line. -POETRY = $(HOME)/.poetry/bin/poetry +POETRY = $(HOME)/.local/bin/poetry SPHINXOPTS = SPHINXBUILD = $(POETRY) run sphinx-build PAPER = @@ -24,7 +24,8 @@ pristine: clean .PHONY: setup setup: - ./_utils/setup.sh + $(POETRY) install + $(POETRY) update .PHONY: clean clean: @@ -70,14 +71,13 @@ linkcheck: setup .PHONY: multiversion multiversion: setup - @mkdir -p $(HOME)/.cache/pypoetry/virtualenvs $(POETRY) run sphinx-multiversion $(SOURCEDIR) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: multiversionpreview multiversionpreview: multiversion - $(POETRY) run python3 -m http.server 5500 --directory $(BUILDDIR)/dirhtml + $(POETRY) run python -m http.server 5500 --directory $(BUILDDIR)/dirhtml .PHONY: test test: setup diff --git a/docs/_utils/setup.sh b/docs/_utils/setup.sh deleted file mode 100755 index b8f50243e4..0000000000 --- a/docs/_utils/setup.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash - -if pwd | egrep -q '\s'; then - echo "Working directory name contains one or more spaces." - exit 1 -fi - -which python3 || { echo "Failed to find python3. Try installing Python for your operative system: https://www.python.org/downloads/" && exit 1; } -which poetry || curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/1.1.3/get-poetry.py | python3 - && source ${HOME}/.poetry/env -poetry install -poetry update diff --git a/docs/conf.py b/docs/conf.py index 4fb79b1e3c..19ccdb621d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,16 +1,14 @@ # -*- coding: utf-8 -*- +import cassandra import os import sys -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('..')) -import cassandra -import recommonmark -from recommonmark.transform import AutoStructify +import warnings +from datetime import date + from sphinx_scylladb_theme.utils import multiversion_regex_builder +sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- @@ -22,10 +20,7 @@ templates_path = ['_templates'] # The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -source_suffix = ['.rst', '.md'] -autosectionlabel_prefix_document = True +source_suffix = [".rst", ".md"] # The encoding of source files. #source_encoding = 'utf-8-sig' @@ -37,7 +32,6 @@ project = u'Cassandra Driver' copyright = u'ScyllaDB 2021 and © DataStax 2013-2017' - # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. @@ -57,13 +51,36 @@ # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' -# Setup Sphinx -def setup(sphinx): - sphinx.add_config_value('recommonmark_config', { - 'enable_eval_rst': True, - 'enable_auto_toc_tree': False, - }, True) - sphinx.add_transform(AutoStructify) +# -- Options for not found extension ------------------------------------------- + +# Template used to render the 404.html generated by this extension. +notfound_template = '404.html' + +# Prefix added to all the URLs generated in the 404 page. +notfound_urls_prefix = '' + +# -- Options for redirect extension -------------------------------------------- + +# Read a YAML dictionary of redirections and generate an HTML file for each +redirects_file = "_utils/redirections.yaml" + +# -- Options for multiversion -------------------------------------------------- +# Whitelist pattern for tags (set to None to ignore all tags) +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.1-scylla'] +smv_tag_whitelist = multiversion_regex_builder(TAGS) +# Whitelist pattern for branches (set to None to ignore all branches) +BRANCHES = ['master'] +smv_branch_whitelist = multiversion_regex_builder(BRANCHES) +# Defines which version is considered to be the latest stable version. +# Must be listed in smv_tag_whitelist or smv_branch_whitelist. +smv_latest_version = '3.25.1-scylla' +smv_rename_latest_version = 'stable' +# Whitelist pattern for remotes (set to None to use local branches only) +smv_remote_whitelist = r"^origin$" +# Pattern for released versions +smv_released_pattern = r'^tags/.*$' +# Format for versioned output directories inside the build directory +smv_outputdir_format = '{ref.name}' # -- Options for HTML output --------------------------------------------------- @@ -79,7 +96,6 @@ def setup(sphinx): 'github_repository': 'scylladb/python-driver', 'github_issues_repository': 'scylladb/python-driver', 'hide_edit_this_page_button': 'false', - 'hide_sidebar_index': 'false', 'hide_version_dropdown': ['master'], } @@ -98,33 +114,3 @@ def setup(sphinx): # Dictionary of values to pass into the template engine’s context for all pages html_context = {'html_baseurl': html_baseurl} -# -- Options for not found extension ------------------------------------------- - -# Template used to render the 404.html generated by this extension. -notfound_template = '404.html' - -# Prefix added to all the URLs generated in the 404 page. -notfound_urls_prefix = '' - -# -- Options for redirect extension -------------------------------------------- - -# Read a YAML dictionary of redirections and generate an HTML file for each -redirects_file = "_utils/redirections.yaml" - -# -- Options for multiversion -------------------------------------------------- -# Whitelist pattern for tags (set to None to ignore all tags) -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.1-scylla'] -smv_tag_whitelist = multiversion_regex_builder(TAGS) -# Whitelist pattern for branches (set to None to ignore all branches) -BRANCHES = ['master'] -smv_branch_whitelist = multiversion_regex_builder(BRANCHES) -# Defines which version is considered to be the latest stable version. -# Must be listed in smv_tag_whitelist or smv_branch_whitelist. -smv_latest_version = '3.25.1-scylla' -smv_rename_latest_version = 'stable' -# Whitelist pattern for remotes (set to None to use local branches only) -smv_remote_whitelist = r"^origin$" -# Pattern for released versions -smv_released_pattern = r'^tags/.*$' -# Format for versioned output directories inside the build directory -smv_outputdir_format = '{ref.name}' diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 0c40a9e464..359b7950ed 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -5,21 +5,22 @@ description = "ScyllaDB Python Driver Docs" authors = ["Python Driver Contributors"] [tool.poetry.dependencies] -python = "^3.7" -geomet = "0.1.2" -six = "1.15.0" -futures = "2.2.0" eventlet = "0.25.2" +futures = "2.2.0" +geomet = "0.1.2" gevent = "20.12.1" -scales = "1.0.9" -[tool.poetry.dev-dependencies] -sphinx-autobuild = "0.7.1" -Sphinx = "2.4.4" -jinja2 = "2.8.1" gremlinpython = "3.4.7" -recommonmark = "0.5.0" -sphinx-scylladb-theme = "~1.0.0" -sphinx-multiversion-scylla = "~0.2.6" +python = "^3.7" +pyyaml = "^6.0" +pygments = "2.2.0" +recommonmark = "^0.7.1" +sphinx-autobuild = "^2021.3.14" +sphinx-sitemap = "2.1.0" +sphinx-scylladb-theme = "~1.1.0" +sphinx-multiversion-scylla = "~0.2.10" +Sphinx = "^4.3.2" +scales = "1.0.9" +six = "1.15.0" [build-system] requires = ["poetry>=0.12"] From 5d7d88d4bd4ecd0bd06ae6cdc71eff605ecab7b4 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Wed, 9 Feb 2022 11:47:14 +0000 Subject: [PATCH 012/551] Fix warning --- docs/conf.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 19ccdb621d..db71285cea 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,14 +1,12 @@ # -*- coding: utf-8 -*- - -import cassandra import os import sys -import warnings from datetime import date from sphinx_scylladb_theme.utils import multiversion_regex_builder sys.path.insert(0, os.path.abspath('..')) +import cassandra # -- General configuration ----------------------------------------------------- From 6d6e19e74d2f8cecec7bdd8420fc218091edf5f6 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 8 Mar 2022 11:25:13 +0200 Subject: [PATCH 013/551] Fix graph docs warnings --- docs/api/cassandra/datastax/graph/index.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/api/cassandra/datastax/graph/index.rst b/docs/api/cassandra/datastax/graph/index.rst index dafd5f65fd..a9b41cbdc2 100644 --- a/docs/api/cassandra/datastax/graph/index.rst +++ b/docs/api/cassandra/datastax/graph/index.rst @@ -37,8 +37,10 @@ .. autoclass:: GraphProtocol :members: + :noindex: .. autoclass:: GraphOptions + :noindex: .. autoattribute:: graph_name @@ -65,29 +67,38 @@ .. autoclass:: SimpleGraphStatement :members: + :noindex: .. autoclass:: Result :members: + :noindex: .. autoclass:: Vertex :members: + :noindex: .. autoclass:: VertexProperty :members: + :noindex: .. autoclass:: Edge :members: + :noindex: .. autoclass:: Path :members: + :noindex: .. autoclass:: T :members: + :noindex: .. autoclass:: GraphSON1Serializer :members: + :noindex: .. autoclass:: GraphSON1Deserializer + :noindex: .. automethod:: deserialize_date @@ -119,3 +130,4 @@ .. autoclass:: GraphSON2Reader :members: + :noindex: From 6eaafc3f465e9dfbbb63959b41683cff93a9771f Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 2 Jul 2020 23:17:06 +0300 Subject: [PATCH 014/551] shard aware: shard aware unique port (advenced shard aware) shard aware port in now advertised OPTIONS messge, and we need to replace the connection with the new host/port * fixing tests to match the advenced shard awareness now that we could have two host listed (one with 9042 port, and one with 19042), we need to make the test a bit less prune to failure cause of that change --- cassandra/c_shard_info.pyx | 13 +++- cassandra/cluster.py | 17 +++-- cassandra/connection.py | 33 ++++++++- cassandra/metadata.py | 4 +- cassandra/pool.py | 43 +++++++++++- cassandra/shard_info.py | 9 ++- docs/scylla_specific.rst | 5 +- tests/integration/standard/test_cluster.py | 2 +- .../integration/standard/test_shard_aware.py | 17 +++-- tests/unit/io/utils.py | 4 +- tests/unit/test_cluster.py | 3 +- tests/unit/test_control_connection.py | 5 ++ tests/unit/test_host_connection_pool.py | 3 +- tests/unit/test_shard_aware.py | 70 ++++++++++++++++++- 14 files changed, 196 insertions(+), 32 deletions(-) diff --git a/cassandra/c_shard_info.pyx b/cassandra/c_shard_info.pyx index 012bfe172b..a1aa42911a 100644 --- a/cassandra/c_shard_info.pyx +++ b/cassandra/c_shard_info.pyx @@ -22,15 +22,19 @@ cdef class ShardingInfo(): cdef readonly str partitioner cdef readonly str sharding_algorithm cdef readonly int sharding_ignore_msb + cdef readonly int shard_aware_port + cdef readonly int shard_aware_port_ssl cdef object __weakref__ - def __init__(self, shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb): + def __init__(self, shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, shard_aware_port, + shard_aware_port_ssl): self.shards_count = int(shards_count) self.partitioner = partitioner self.sharding_algorithm = sharding_algorithm self.sharding_ignore_msb = int(sharding_ignore_msb) - + self.shard_aware_port = int(shard_aware_port) if shard_aware_port else 0 + self.shard_aware_port_ssl = int(shard_aware_port_ssl) if shard_aware_port_ssl else 0 @staticmethod def parse_sharding_info(message): @@ -39,12 +43,15 @@ cdef class ShardingInfo(): partitioner = message.options.get('SCYLLA_PARTITIONER', [''])[0] or None sharding_algorithm = message.options.get('SCYLLA_SHARDING_ALGORITHM', [''])[0] or None sharding_ignore_msb = message.options.get('SCYLLA_SHARDING_IGNORE_MSB', [''])[0] or None + shard_aware_port = message.options.get('SCYLLA_SHARD_AWARE_PORT', [''])[0] or None + shard_aware_port_ssl = message.options.get('SCYLLA_SHARD_AWARE_PORT_SSL', [''])[0] or None if not (shard_id or shards_count or partitioner == "org.apache.cassandra.dht.Murmur3Partitioner" or sharding_algorithm == "biased-token-round-robin" or sharding_ignore_msb): return 0, None - return int(shard_id), ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb) + return int(shard_id), ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, + shard_aware_port, shard_aware_port_ssl) def shard_id_from_token(self, int64_t token_input): diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ca5e2c9ed6..92ec95cb26 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1734,14 +1734,20 @@ def get_connection_holders(self): holders.append(self.control_connection) return holders + def get_all_pools(self): + pools = [] + for s in tuple(self.sessions): + pools.extend(s.get_pools()) + return pools + def is_shard_aware(self): - return bool(self.get_connection_holders()[:-1][0].host.sharding_info) + return bool(self.get_all_pools()[0].host.sharding_info) def shard_aware_stats(self): if self.is_shard_aware(): return {str(pool.host.endpoint): {'shards_count': pool.host.sharding_info.shards_count, 'connected': len(pool._connections.keys())} - for pool in self.get_connection_holders()[:-1]} + for pool in self.get_all_pools()} def shutdown(self): """ @@ -3756,7 +3762,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, partitioner = local_row.get("partitioner") tokens = local_row.get("tokens") - host = self._cluster.metadata.get_host(connection.endpoint) + host = self._cluster.metadata.get_host(connection.original_endpoint) if host: datacenter = local_row.get("data_center") rack = local_row.get("rack") @@ -4049,9 +4055,8 @@ def _get_peers_query(self, peers_query_type, connection=None): query_template = (self._SELECT_SCHEMA_PEERS_TEMPLATE if peers_query_type == self.PeersQueryType.PEERS_SCHEMA else self._SELECT_PEERS_NO_TOKENS_TEMPLATE) - - host_release_version = self._cluster.metadata.get_host(connection.endpoint).release_version - host_dse_version = self._cluster.metadata.get_host(connection.endpoint).dse_version + host_release_version = self._cluster.metadata.get_host(connection.original_endpoint).release_version + host_dse_version = self._cluster.metadata.get_host(connection.original_endpoint).dse_version uses_native_address_query = ( host_dse_version and Version(host_dse_version) >= self._MINIMUM_NATIVE_ADDRESS_DSE_VERSION) diff --git a/cassandra/connection.py b/cassandra/connection.py index 8218b00117..c48a4deac8 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -28,7 +28,8 @@ import time import ssl import weakref - +import random +import itertools if 'gevent.monkey' in sys.modules: from gevent.queue import Queue, Empty @@ -116,6 +117,10 @@ def decompress(byts): HEADER_DIRECTION_TO_CLIENT = 0x80 HEADER_DIRECTION_MASK = 0x80 +# shard aware default for opening per shard connection +DEFAULT_LOCAL_PORT_LOW = 49152 +DEFAULT_LOCAL_PORT_HIGH = 65535 + frame_header_v1_v2 = struct.Struct('>BbBi') frame_header_v3 = struct.Struct('>BhBi') @@ -666,6 +671,17 @@ def reset_cql_frame_buffer(self): self.reset_io_buffer() +class ShardawarePortGenerator: + @classmethod + def generate(cls, shard_id, total_shards): + start = random.randrange(DEFAULT_LOCAL_PORT_LOW, DEFAULT_LOCAL_PORT_HIGH) + available_ports = itertools.chain(range(start, DEFAULT_LOCAL_PORT_HIGH), range(DEFAULT_LOCAL_PORT_LOW, start)) + + for port in available_ports: + if port % total_shards == shard_id: + yield port + + class Connection(object): CALLBACK_ERR_THREAD_THRESHOLD = 100 @@ -762,7 +778,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False, - ssl_context=None, owning_pool=None): + ssl_context=None, owning_pool=None, shard_id=None, total_shards=None): # TODO next major rename host to endpoint and remove port kwarg. self.endpoint = host if isinstance(host, EndPoint) else DefaultEndPoint(host, port) @@ -812,6 +828,9 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.lock = RLock() self.connected_event = Event() + self.shard_id = shard_id + self.total_shards = total_shards + self.original_endpoint = self.endpoint @property def host(self): @@ -874,6 +893,15 @@ def _wrap_socket_from_context(self): self._socket = self.ssl_context.wrap_socket(self._socket, **ssl_options) def _initiate_connection(self, sockaddr): + if self.shard_id is not None: + for port in ShardawarePortGenerator.generate(self.shard_id, self.total_shards): + try: + self._socket.bind(('', port)) + break + except Exception as ex: + log.debug("port=%d couldn't bind cause: %s", port, str(ex)) + log.debug(f'connection (%r) port=%d should be shard_id=%d', id(self), port, port % self.total_shards) + self._socket.connect(sockaddr) def _match_hostname(self): @@ -894,6 +922,7 @@ def _get_socket_addresses(self): def _connect_socket(self): sockerr = None addresses = self._get_socket_addresses() + port = None for (af, socktype, proto, _, sockaddr) in addresses: try: self._socket = self._socket_impl.socket(af, socktype, proto) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 83beb6190c..131900b323 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -134,8 +134,8 @@ def export_schema_as_string(self): def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs): - server_version = self.get_host(connection.endpoint).release_version - dse_version = self.get_host(connection.endpoint).dse_version + server_version = self.get_host(connection.original_endpoint).release_version + dse_version = self.get_host(connection.original_endpoint).dse_version parser = get_schema_parser(connection, server_version, dse_version, timeout) if not target_type: diff --git a/cassandra/pool.py b/cassandra/pool.py index 87b66dd85b..01b466a363 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -21,6 +21,7 @@ import socket import time import random +import copy from threading import Lock, RLock, Condition import weakref try: @@ -412,6 +413,7 @@ def __init__(self, host, host_distance, session): # so that we can dispose of them. self._trash = set() self._shard_connections_futures = [] + self.advanced_shardaware_block_until = 0 if host_distance == HostDistance.IGNORED: log.debug("Not opening connection to ignored host %s", self.host) @@ -431,7 +433,7 @@ def __init__(self, host, host_distance, session): if first_connection.sharding_info: self.host.sharding_info = first_connection.sharding_info - self._open_connections_for_all_shards() + self._open_connections_for_all_shards(first_connection.shard_id) log.debug("Finished initializing connection for host %s", self.host) @@ -645,6 +647,24 @@ def _close_excess_connections(self): log.debug("Closing excess connection (%s) to %s", id(c), self.host) c.close() + def disable_advanced_shard_aware(self, secs): + log.warning("disabling advanced_shard_aware for %i seconds, could be that this client is behind NAT?", secs) + self.advanced_shardaware_block_until = max(time.time() + secs, self.advanced_shardaware_block_until) + + def _get_shard_aware_endpoint(self): + if self.advanced_shardaware_block_until and self.advanced_shardaware_block_until < time.time(): + return None + + endpoint = None + if self._session.cluster.ssl_options and self.host.sharding_info.shard_aware_port_ssl: + endpoint = copy.copy(self.host.endpoint) + endpoint._port = self.host.sharding_info.shard_aware_port_ssl + elif self.host.sharding_info.shard_aware_port: + endpoint = copy.copy(self.host.endpoint) + endpoint._port = self.host.sharding_info.shard_aware_port + + return endpoint + def _open_connection_to_missing_shard(self, shard_id): """ Creates a new connection, checks its shard_id and populates our shard @@ -666,13 +686,28 @@ def _open_connection_to_missing_shard(self, shard_id): with self._lock: if self.is_shutdown: return + shard_aware_endpoint = self._get_shard_aware_endpoint() + log.debug("shard_aware_endpoint=%r", shard_aware_endpoint) + + if shard_aware_endpoint: + conn = self._session.cluster.connection_factory(shard_aware_endpoint, owning_pool=self, + shard_id=shard_id, + total_shards=self.host.sharding_info.shards_count) + conn.original_endpoint = self.host.endpoint + else: + conn = self._session.cluster.connection_factory(self.host.endpoint, owning_pool=self) - conn = self._session.cluster.connection_factory(self.host.endpoint) log.debug("Received a connection %s for shard_id=%i on host %s", id(conn), conn.shard_id, self.host) if self.is_shutdown: log.debug("Pool for host %s is in shutdown, closing the new connection (%s)", self.host, id(conn)) conn.close() return + + if shard_aware_endpoint and shard_id != conn.shard_id: + # connection didn't land on expected shared + # assuming behind a NAT, disabling advanced shard aware for a while + self.disable_advanced_shard_aware(10 * 60) + old_conn = self._connections.get(conn.shard_id) if old_conn is None or old_conn.orphaned_threshold_reached: log.debug( @@ -776,7 +811,7 @@ def _open_connection_to_missing_shard(self, shard_id): conn.close() self._connecting.discard(shard_id) - def _open_connections_for_all_shards(self): + def _open_connections_for_all_shards(self, skip_shard_id=None): """ Loop over all the shards and try to open a connection to each one. """ @@ -785,6 +820,8 @@ def _open_connections_for_all_shards(self): return for shard_id in range(self.host.sharding_info.shards_count): + if skip_shard_id and skip_shard_id == shard_id: + continue future = self._session.submit(self._open_connection_to_missing_shard, shard_id) if isinstance(future, Future): self._connecting.add(shard_id) diff --git a/cassandra/shard_info.py b/cassandra/shard_info.py index 6bd56fa796..a37b8467b5 100644 --- a/cassandra/shard_info.py +++ b/cassandra/shard_info.py @@ -20,11 +20,13 @@ class _ShardingInfo(object): - def __init__(self, shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb): + def __init__(self, shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, shard_aware_port, shard_aware_port_ssl): self.shards_count = int(shards_count) self.partitioner = partitioner self.sharding_algorithm = sharding_algorithm self.sharding_ignore_msb = int(sharding_ignore_msb) + self.shard_aware_port = int(shard_aware_port) if shard_aware_port else None + self.shard_aware_port_ssl = int(shard_aware_port_ssl) if shard_aware_port_ssl else None @staticmethod def parse_sharding_info(message): @@ -33,13 +35,16 @@ def parse_sharding_info(message): partitioner = message.options.get('SCYLLA_PARTITIONER', [''])[0] or None sharding_algorithm = message.options.get('SCYLLA_SHARDING_ALGORITHM', [''])[0] or None sharding_ignore_msb = message.options.get('SCYLLA_SHARDING_IGNORE_MSB', [''])[0] or None + shard_aware_port = message.options.get('SCYLLA_SHARD_AWARE_PORT', [''])[0] or None + shard_aware_port_ssl = message.options.get('SCYLLA_SHARD_AWARE_PORT_SSL', [''])[0] or None log.debug("Parsing sharding info from message options %s", message.options) if not (shard_id or shards_count or partitioner == "org.apache.cassandra.dht.Murmur3Partitioner" or sharding_algorithm == "biased-token-round-robin" or sharding_ignore_msb): return 0, None - return int(shard_id), _ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb) + return int(shard_id), _ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, + shard_aware_port, shard_aware_port_ssl) def shard_id_from_token(self, token): """ diff --git a/docs/scylla_specific.rst b/docs/scylla_specific.rst index 366628e59b..fec6e50c88 100644 --- a/docs/scylla_specific.rst +++ b/docs/scylla_specific.rst @@ -8,10 +8,13 @@ Shard Awareness As a result, latency is significantly reduced because there is no need to pass data between the shards. Details on the scylla cql protocol extensions -https://github.com/scylladb/scylla/blob/master/docs/design-notes/protocol-extensions.md +https://github.com/scylladb/scylla/blob/master/docs/design-notes/protocol-extensions.md#intranode-sharding For using it you only need to enable ``TokenAwarePolicy`` on the ``Cluster`` +See the configuration of ``native_shard_aware_transport_port`` and ``native_shard_aware_transport_port_ssl`` on scylla.yaml: +https://github.com/scylladb/scylla/blob/master/docs/design-notes/protocols.md#cql-client-protocol + .. code:: python from cassandra.cluster import Cluster diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 76d3031a6f..f69ab6f57f 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -1514,7 +1514,7 @@ def test_prepare_on_ignored_hosts(self): # the length of mock_calls will vary, but all should use the unignored # address for c in cluster.connection_factory.mock_calls: - self.assertEqual(call(DefaultEndPoint(unignored_address)), c) + self.assertEqual(unignored_address, c.args[0].address) cluster.shutdown() diff --git a/tests/integration/standard/test_shard_aware.py b/tests/integration/standard/test_shard_aware.py index 8884ac8e46..ef2348d1b2 100644 --- a/tests/integration/standard/test_shard_aware.py +++ b/tests/integration/standard/test_shard_aware.py @@ -15,6 +15,7 @@ import time import random from subprocess import run +import logging try: from concurrent.futures import ThreadPoolExecutor, as_completed @@ -28,10 +29,12 @@ from cassandra.cluster import Cluster from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, ConstantReconnectionPolicy -from cassandra import OperationTimedOut +from cassandra import OperationTimedOut, ConsistencyLevel from tests.integration import use_cluster, get_node, PROTOCOL_VERSION +LOGGER = logging.getLogger(__name__) + def setup_module(): os.environ['SCYLLA_EXT_OPTS'] = "--smp 4 --memory 2048M" @@ -41,12 +44,12 @@ def setup_module(): class TestShardAwareIntegration(unittest.TestCase): @classmethod def setup_class(cls): - cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), + cls.cluster = Cluster(contact_points=["127.0.0.1"], protocol_version=PROTOCOL_VERSION, + load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), reconnection_policy=ConstantReconnectionPolicy(1)) cls.session = cls.cluster.connect() - - print(cls.cluster.is_shard_aware()) - print(cls.cluster.shard_aware_stats()) + LOGGER.info(cls.cluster.is_shard_aware()) + LOGGER.info(cls.cluster.shard_aware_stats()) @classmethod def teardown_class(cls): @@ -56,7 +59,7 @@ def verify_same_shard_in_tracing(self, results, shard_name): traces = results.get_query_trace() events = traces.events for event in events: - print(event.thread_name, event.description) + LOGGER.info("%s %s", event.thread_name, event.description) for event in events: self.assertEqual(event.thread_name, shard_name) self.assertIn('querying locally', "\n".join([event.description for event in events])) @@ -65,7 +68,7 @@ def verify_same_shard_in_tracing(self, results, shard_name): traces = self.session.execute("SELECT * FROM system_traces.events WHERE session_id = %s", (trace_id,)) events = [event for event in traces] for event in events: - print(event.thread, event.activity) + LOGGER.info("%s %s", event.thread, event.activity) for event in events: self.assertEqual(event.thread, shard_name) self.assertIn('querying locally', "\n".join([event.activity for event in events])) diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 848513f031..ac8b8196db 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -28,7 +28,7 @@ from itertools import cycle import six from six import binary_type, BytesIO -from mock import Mock +from mock import Mock, MagicMock import errno import logging @@ -214,7 +214,7 @@ def make_header_prefix(self, message_class, version=2, stream_id=0): def make_connection(self): c = self.connection_class(DefaultEndPoint('1.2.3.4'), cql_version='3.0.1', connect_timeout=5) - mocket = Mock() + mocket = MagicMock() mocket.send.side_effect = lambda x: len(x) self.set_socket(c, mocket) return c diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 620f642084..2c9ebd3872 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -155,6 +155,7 @@ def test_default_serial_consistency_level_ep(self, *_): """ c = Cluster(protocol_version=4) s = Session(c, [Host("127.0.0.1", SimpleConvictionPolicy)]) + c.connection_class.initialize_reactor() # default is None default_profile = c.profile_manager.default @@ -183,7 +184,7 @@ def test_default_serial_consistency_level_legacy(self, *_): """ c = Cluster(protocol_version=4) s = Session(c, [Host("127.0.0.1", SimpleConvictionPolicy)]) - + c.connection_class.initialize_reactor() # default is None self.assertIsNone(s.default_serial_consistency_level) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index efad1ca5c9..cb4d9c8ada 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -84,6 +84,7 @@ def __init__(self): self.executor = Mock(spec=ThreadPoolExecutor) self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(RoundRobinPolicy()) self.endpoint_factory = DefaultEndPointFactory().configure(self) + self.ssl_options = None def add_host(self, endpoint, datacenter, rack, signal=False, refresh_nodes=True): host = Host(endpoint, SimpleConvictionPolicy, datacenter, rack) @@ -99,6 +100,9 @@ def on_up(self, host): def on_down(self, host, is_host_addition): self.down_host = host + def get_control_connection_host(self): + return self.added_hosts[0] if self.added_hosts else None + def _node_meta_results(local_results, peer_results): """ @@ -121,6 +125,7 @@ class MockConnection(object): def __init__(self): self.endpoint = DefaultEndPoint("192.168.1.0") + self.original_endpoint = self.endpoint self.local_results = [ ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 6a82a05fe0..67cf42559d 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -301,7 +301,8 @@ def mock_connection_factory(self, *args, **kwargs): connection.shard_id = self.connection_counter self.connection_counter += 1 connection.sharding_info = _ShardingInfo(shard_id=1, shards_count=14, - partitioner="", sharding_algorithm="", sharding_ignore_msb=0) + partitioner="", sharding_algorithm="", sharding_ignore_msb=0, + shard_aware_port="", shard_aware_port_ssl="") return connection diff --git a/tests/unit/test_shard_aware.py b/tests/unit/test_shard_aware.py index 2d049f28fd..81bee1d8a8 100644 --- a/tests/unit/test_shard_aware.py +++ b/tests/unit/test_shard_aware.py @@ -17,9 +17,16 @@ except ImportError: import unittest # noqa -from cassandra.connection import ShardingInfo +import logging +from unittest.mock import MagicMock +from futures.thread import ThreadPoolExecutor + +from cassandra.pool import HostConnection, HostDistance +from cassandra.connection import ShardingInfo, DefaultEndPoint from cassandra.metadata import Murmur3Token +LOGGER = logging.getLogger(__name__) + class TestShardAware(unittest.TestCase): def test_parsing_and_calculating_shard_id(self): @@ -43,3 +50,64 @@ class OptionsHolder(object): self.assertEqual(shard_info.shard_id_from_token(Murmur3Token.from_key(b"c").value), 6) self.assertEqual(shard_info.shard_id_from_token(Murmur3Token.from_key(b"e").value), 4) self.assertEqual(shard_info.shard_id_from_token(Murmur3Token.from_key(b"100000").value), 2) + + def test_advanced_shard_aware_port(self): + """ + Test that on given a `shard_aware_port` on the OPTIONS message (ShardInfo class) + the next connections would be open using this port + """ + class MockSession(MagicMock): + is_shutdown = False + keyspace = "ks1" + + def __init__(self, is_ssl=False, *args, **kwargs): + super().__init__(*args, **kwargs) + self.cluster = MagicMock() + if is_ssl: + self.cluster.ssl_options = {'some_ssl_options': True} + else: + self.cluster.ssl_options = None + self.cluster.executor = ThreadPoolExecutor(max_workers=2) + self.cluster.signal_connection_failure = lambda *args, **kwargs: False + self.cluster.connection_factory = self.mock_connection_factory + self.connection_counter = -1 + self.futures = [] + + def submit(self, fn, *args, **kwargs): + logging.info("Scheduling %s with args: %s, kwargs: %s", fn, args, kwargs) + if not self.is_shutdown: + f = self.cluster.executor.submit(fn, *args, **kwargs) + self.futures += [f] + return f + + def mock_connection_factory(self, *args, **kwargs): + connection = MagicMock() + connection.is_shutdown = False + connection.is_defunct = False + connection.is_closed = False + connection.orphaned_threshold_reached = False + connection.endpoint = args[0] + connection.shard_id = kwargs.get('shard_id', 0) + connection.sharding_info = ShardingInfo(shard_id=1, shards_count=4, + partitioner="", sharding_algorithm="", sharding_ignore_msb=0, + shard_aware_port=19042, shard_aware_port_ssl=19045) + + return connection + + host = MagicMock() + host.endpoint = DefaultEndPoint("1.2.3.4") + + for port, is_ssl in [(19042, False), (19045, True)]: + session = MockSession(is_ssl=is_ssl) + pool = HostConnection(host=host, host_distance=HostDistance.REMOTE, session=session) + for f in session.futures: + f.result() + assert len(pool._connections) == 4 + for shard_id, connection in pool._connections.items(): + assert connection.shard_id == shard_id + if shard_id == 0: + assert connection.endpoint == DefaultEndPoint("1.2.3.4") + else: + assert connection.endpoint == DefaultEndPoint("1.2.3.4", port=port) + + session.cluster.executor.shutdown(wait=True) From 02117bc470eca5507066a0cf1735f92f16e5f5f6 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 7 Mar 2022 10:15:45 +0200 Subject: [PATCH 015/551] shard_aware: adding `shard_aware_options` to Cluster options In some cases users don't want the automatic opening of so many connections (num of shard * num of nodes), this is adding a new Cluster parameter that can disable shard awareness ```python cluster = Cluster(contact_points=["127.0.0.1"], shard_aware_options=dict(disable=True), load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) ``` --- cassandra/cluster.py | 24 +++++++++++++++++++++++- cassandra/pool.py | 12 ++++++------ docs/scylla_specific.rst | 19 +++++++++++++++++++ tests/unit/test_control_connection.py | 3 --- tests/unit/test_host_connection_pool.py | 3 ++- tests/unit/test_shard_aware.py | 7 +++++-- 6 files changed, 55 insertions(+), 13 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 92ec95cb26..6894f1a6c0 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -553,6 +553,20 @@ def default(self): """ +class ShardAwareOptions: + disable = None + disable_shardaware_port = False + + def __init__(self, opts=None, disable=None, disable_shardaware_port=None): + self.disable = disable + self.disable_shardaware_port = disable_shardaware_port + if opts: + if isinstance(opts, ShardAwareOptions): + self.__dict__.update(opts.__dict__) + elif isinstance(opts, dict): + self.__dict__.update(opts) + + class _ConfigMode(object): UNCOMMITTED = 0 LEGACY = 1 @@ -1003,6 +1017,12 @@ def default_retry_policy(self, policy): load the configuration and certificates. """ + shard_aware_options = None + """ + Can be set with :class:`ShardAwareOptions` or with a dict, to disable the automatic shardaware, + or to disable the shardaware port (advanced shardaware) + """ + @property def schema_metadata_enabled(self): """ @@ -1104,7 +1124,8 @@ def __init__(self, monitor_reporting_enabled=True, monitor_reporting_interval=30, client_id=None, - cloud=None): + cloud=None, + shard_aware_options=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -1304,6 +1325,7 @@ def __init__(self, self.reprepare_on_up = reprepare_on_up self.monitor_reporting_enabled = monitor_reporting_enabled self.monitor_reporting_interval = monitor_reporting_interval + self.shard_aware_options = ShardAwareOptions(opts=shard_aware_options) self._listeners = set() self._listener_lock = Lock() diff --git a/cassandra/pool.py b/cassandra/pool.py index 01b466a363..3a80054c63 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -430,8 +430,7 @@ def __init__(self, host, host_distance, session): if self._keyspace: first_connection.set_keyspace_blocking(self._keyspace) - - if first_connection.sharding_info: + if first_connection.sharding_info and not self._session.cluster.shard_aware_options.disable: self.host.sharding_info = first_connection.sharding_info self._open_connections_for_all_shards(first_connection.shard_id) @@ -446,7 +445,7 @@ def _get_connection_for_routing_key(self, routing_key=None): raise NoConnectionsAvailable() shard_id = None - if self.host.sharding_info and routing_key: + if not self._session.cluster.shard_aware_options.disable and self.host.sharding_info and routing_key: t = self._session.cluster.metadata.token_map.token_class.from_key(routing_key) shard_id = self.host.sharding_info.shard_id_from_token(t.value) @@ -585,7 +584,7 @@ def _replace(self, connection): try: if connection.shard_id in self._connections.keys(): del self._connections[connection.shard_id] - if self.host.sharding_info: + if self.host.sharding_info and not self._session.cluster.shard_aware_options.disable: self._connecting.add(connection.shard_id) self._session.submit(self._open_connection_to_missing_shard, connection.shard_id) else: @@ -652,7 +651,8 @@ def disable_advanced_shard_aware(self, secs): self.advanced_shardaware_block_until = max(time.time() + secs, self.advanced_shardaware_block_until) def _get_shard_aware_endpoint(self): - if self.advanced_shardaware_block_until and self.advanced_shardaware_block_until < time.time(): + if (self.advanced_shardaware_block_until and self.advanced_shardaware_block_until < time.time()) or \ + self._session.cluster.shard_aware_options.disable_shardaware_port: return None endpoint = None @@ -820,7 +820,7 @@ def _open_connections_for_all_shards(self, skip_shard_id=None): return for shard_id in range(self.host.sharding_info.shards_count): - if skip_shard_id and skip_shard_id == shard_id: + if skip_shard_id is not None and skip_shard_id == shard_id: continue future = self._session.submit(self._open_connection_to_missing_shard, shard_id) if isinstance(future, Future): diff --git a/docs/scylla_specific.rst b/docs/scylla_specific.rst index fec6e50c88..24e2182dc6 100644 --- a/docs/scylla_specific.rst +++ b/docs/scylla_specific.rst @@ -26,6 +26,25 @@ https://github.com/scylladb/scylla/blob/master/docs/design-notes/protocols.md#cq New Cluster Helpers ------------------- +* ``shard_aware_options`` + + Setting it to ``dict(disable=True)`` would disable the shard aware functionally, for cases favoring once connection per host (example, lots of processes connecting from one client host, generating a big load of connections + + Other option is to configure scylla by setting ``enable_shard_aware_drivers: false`` on scylla.yaml. + +.. code:: python + + from cassandra.cluster import Cluster + + cluster = Cluster(shard_aware_options=dict(disable=True)) + session = cluster.connect() + + assert not cluster.is_shard_aware(), "Shard aware should be disabled" + + # or just disable the shard aware port logic + cluster = Cluster(shard_aware_options=dict(disable_shardaware_port=True)) + session = cluster.connect() + * ``cluster.is_shard_aware()`` New method available on ``Cluster`` allowing to check whether the remote cluster supports shard awareness (bool) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index cb4d9c8ada..9ced92c2c6 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -100,9 +100,6 @@ def on_up(self, host): def on_down(self, host, is_host_addition): self.down_host = host - def get_control_connection_host(self): - return self.added_hosts[0] if self.added_hosts else None - def _node_meta_results(local_results, peer_results): """ diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 67cf42559d..8c51758cd9 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -26,7 +26,7 @@ from mock import Mock, NonCallableMagicMock, MagicMock from threading import Thread, Event, Lock -from cassandra.cluster import Session +from cassandra.cluster import Session, ShardAwareOptions from cassandra.connection import Connection from cassandra.pool import HostConnection, HostConnectionPool from cassandra.pool import Host, NoConnectionsAvailable @@ -160,6 +160,7 @@ def test_return_defunct_connection_on_down_host(self): conn = NonCallableMagicMock(spec=Connection, in_flight=0, is_defunct=False, is_closed=False, max_request_id=100, signaled_error=False) session.cluster.connection_factory.return_value = conn + session.cluster.shard_aware_options = ShardAwareOptions() pool = self.PoolImpl(host, HostDistance.LOCAL, session) session.cluster.connection_factory.assert_called_once_with(host.endpoint, owning_pool=pool) diff --git a/tests/unit/test_shard_aware.py b/tests/unit/test_shard_aware.py index 81bee1d8a8..c05eb51d5d 100644 --- a/tests/unit/test_shard_aware.py +++ b/tests/unit/test_shard_aware.py @@ -21,6 +21,7 @@ from unittest.mock import MagicMock from futures.thread import ThreadPoolExecutor +from cassandra.cluster import ShardAwareOptions from cassandra.pool import HostConnection, HostDistance from cassandra.connection import ShardingInfo, DefaultEndPoint from cassandra.metadata import Murmur3Token @@ -67,10 +68,11 @@ def __init__(self, is_ssl=False, *args, **kwargs): self.cluster.ssl_options = {'some_ssl_options': True} else: self.cluster.ssl_options = None + self.cluster.shard_aware_options = ShardAwareOptions() self.cluster.executor = ThreadPoolExecutor(max_workers=2) self.cluster.signal_connection_failure = lambda *args, **kwargs: False self.cluster.connection_factory = self.mock_connection_factory - self.connection_counter = -1 + self.connection_counter = 0 self.futures = [] def submit(self, fn, *args, **kwargs): @@ -87,7 +89,8 @@ def mock_connection_factory(self, *args, **kwargs): connection.is_closed = False connection.orphaned_threshold_reached = False connection.endpoint = args[0] - connection.shard_id = kwargs.get('shard_id', 0) + connection.shard_id = kwargs.get('shard_id', self.connection_counter) + self.connection_counter += 1 connection.sharding_info = ShardingInfo(shard_id=1, shards_count=4, partitioner="", sharding_algorithm="", sharding_ignore_msb=0, shard_aware_port=19042, shard_aware_port_ssl=19045) From acc72638d024f3c942566044bd212c252dadc919 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 9 Mar 2022 14:18:25 +0200 Subject: [PATCH 016/551] Release 3.25.2 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 84a7de11a5..e550cfb2d2 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 1) +__version_info__ = (3, 25, 2) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index db71285cea..206b152cb8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -64,14 +64,14 @@ # -- Options for multiversion -------------------------------------------------- # Whitelist pattern for tags (set to None to ignore all tags) -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.1-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.2-scylla'] smv_tag_whitelist = multiversion_regex_builder(TAGS) # Whitelist pattern for branches (set to None to ignore all branches) BRANCHES = ['master'] smv_branch_whitelist = multiversion_regex_builder(BRANCHES) # Defines which version is considered to be the latest stable version. # Must be listed in smv_tag_whitelist or smv_branch_whitelist. -smv_latest_version = '3.25.1-scylla' +smv_latest_version = '3.25.2-scylla' smv_rename_latest_version = 'stable' # Whitelist pattern for remotes (set to None to use local branches only) smv_remote_whitelist = r"^origin$" From 8c4c6536d4728e052771651120df6cbb2e730773 Mon Sep 17 00:00:00 2001 From: Andy Salnikov Date: Wed, 16 Mar 2022 13:19:03 -0700 Subject: [PATCH 017/551] Merge pull request #1122 from andy-slac/concurrent-execution-profiles Adds one more keyword argument `execution_profile` to the `execute_concurrent` method to pass an execution profile. It is fowarded to `Session.execute_async` call. --- cassandra/concurrent.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index a8bddcbdab..0228f297fe 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -21,7 +21,7 @@ from threading import Condition import sys -from cassandra.cluster import ResultSet +from cassandra.cluster import ResultSet, EXEC_PROFILE_DEFAULT import logging log = logging.getLogger(__name__) @@ -29,7 +29,7 @@ ExecutionResult = namedtuple('ExecutionResult', ['success', 'result_or_exc']) -def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False): +def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False, execution_profile=EXEC_PROFILE_DEFAULT): """ Executes a sequence of (statement, parameters) tuples concurrently. Each ``parameters`` item must be a sequence or :const:`None`. @@ -56,6 +56,9 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais footprint is marginal CPU overhead (more thread coordination and sorting out-of-order results on-the-fly). + `execution_profile` argument is the execution profile to use for this + request, it is passed directly to :meth:`Session.execute_async`. + A sequence of ``ExecutionResult(success, result_or_exc)`` namedtuples is returned in the same order that the statements were passed in. If ``success`` is :const:`False`, there was an error executing the statement, and ``result_or_exc`` will be @@ -90,7 +93,8 @@ def execute_concurrent(session, statements_and_parameters, concurrency=100, rais if not statements_and_parameters: return [] - executor = ConcurrentExecutorGenResults(session, statements_and_parameters) if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters) + executor = ConcurrentExecutorGenResults(session, statements_and_parameters, execution_profile) \ + if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters, execution_profile) return executor.execute(concurrency, raise_on_first_error) @@ -98,9 +102,10 @@ class _ConcurrentExecutor(object): max_error_recursion = 100 - def __init__(self, session, statements_and_params): + def __init__(self, session, statements_and_params, execution_profile): self.session = session self._enum_statements = enumerate(iter(statements_and_params)) + self._execution_profile = execution_profile self._condition = Condition() self._fail_fast = False self._results_queue = [] @@ -132,7 +137,7 @@ def _execute_next(self): def _execute(self, idx, statement, params): self._exec_depth += 1 try: - future = self.session.execute_async(statement, params, timeout=None) + future = self.session.execute_async(statement, params, timeout=None, execution_profile=self._execution_profile) args = (future, idx) future.add_callbacks( callback=self._on_success, callback_args=args, From 9e4904917dcc895c96daafbfe7cc215f101e8f8c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 16 Mar 2022 15:24:32 -0500 Subject: [PATCH 018/551] Add tests for recent addition of execution profile support to cassandra.concurrent --- tests/integration/standard/test_concurrent.py | 66 +++++++++++-------- 1 file changed, 38 insertions(+), 28 deletions(-) diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index ad4ef47473..15da526bde 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -20,7 +20,7 @@ from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.concurrent import execute_concurrent, execute_concurrent_with_args, ExecutionResult from cassandra.policies import HostDistance -from cassandra.query import tuple_factory, SimpleStatement +from cassandra.query import dict_factory, tuple_factory, SimpleStatement from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster @@ -35,13 +35,16 @@ def setup_module(): use_singledc() +EXEC_PROFILE_DICT = "dict" + class ClusterTests(unittest.TestCase): @classmethod def setUpClass(cls): cls.cluster = TestCluster( execution_profiles = { - EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory) + EXEC_PROFILE_DEFAULT: ExecutionProfile(row_factory=tuple_factory), + EXEC_PROFILE_DICT: ExecutionProfile(row_factory=dict_factory) } ) if PROTOCOL_VERSION < 3: @@ -52,11 +55,11 @@ def setUpClass(cls): def tearDownClass(cls): cls.cluster.shutdown() - def execute_concurrent_helper(self, session, query, results_generator=False): + def execute_concurrent_helper(self, session, query, **kwargs): count = 0 while count < 100: try: - return execute_concurrent(session, query, results_generator=False) + return execute_concurrent(session, query, results_generator=False, **kwargs) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) @@ -65,11 +68,11 @@ def execute_concurrent_helper(self, session, query, results_generator=False): raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) - def execute_concurrent_args_helper(self, session, query, params, results_generator=False): + def execute_concurrent_args_helper(self, session, query, params, results_generator=False, **kwargs): count = 0 while count < 100: try: - return execute_concurrent_with_args(session, query, params, results_generator=results_generator) + return execute_concurrent_with_args(session, query, params, results_generator=results_generator, **kwargs) except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure): ex_type, ex, tb = sys.exc_info() log.warning("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb))) @@ -77,7 +80,7 @@ def execute_concurrent_args_helper(self, session, query, params, results_generat raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query)) - def test_execute_concurrent(self): + def execute_concurrent_base(self, test_fn, validate_fn, zip_args=True): for num_statements in (0, 1, 2, 7, 10, 99, 100, 101, 199, 200, 201): # write statement = SimpleStatement( @@ -86,7 +89,9 @@ def test_execute_concurrent(self): statements = cycle((statement, )) parameters = [(i, i) for i in range(num_statements)] - results = self.execute_concurrent_helper(self.session, list(zip(statements, parameters))) + results = \ + test_fn(self.session, list(zip(statements, parameters))) if zip_args else \ + test_fn(self.session, statement, parameters) self.assertEqual(num_statements, len(results)) for success, result in results: self.assertTrue(success) @@ -99,32 +104,37 @@ def test_execute_concurrent(self): statements = cycle((statement, )) parameters = [(i, ) for i in range(num_statements)] - results = self.execute_concurrent_helper(self.session, list(zip(statements, parameters))) + results = \ + test_fn(self.session, list(zip(statements, parameters))) if zip_args else \ + test_fn(self.session, statement, parameters) + validate_fn(num_statements, results) + + def execute_concurrent_valiate_tuple(self, num_statements, results): self.assertEqual(num_statements, len(results)) self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) - def test_execute_concurrent_with_args(self): - for num_statements in (0, 1, 2, 7, 10, 99, 100, 101, 199, 200, 201): - statement = SimpleStatement( - "INSERT INTO test3rf.test (k, v) VALUES (%s, %s)", - consistency_level=ConsistencyLevel.QUORUM) - parameters = [(i, i) for i in range(num_statements)] - - results = self.execute_concurrent_args_helper(self.session, statement, parameters) + def execute_concurrent_valiate_dict(self, num_statements, results): self.assertEqual(num_statements, len(results)) - for success, result in results: - self.assertTrue(success) - self.assertFalse(result) + self.assertEqual([(True, [{"v":i}]) for i in range(num_statements)], results) - # read - statement = SimpleStatement( - "SELECT v FROM test3rf.test WHERE k=%s", - consistency_level=ConsistencyLevel.QUORUM) - parameters = [(i, ) for i in range(num_statements)] + def test_execute_concurrent(self): + self.execute_concurrent_base(self.execute_concurrent_helper, \ + self.execute_concurrent_valiate_tuple) - results = self.execute_concurrent_args_helper(self.session, statement, parameters) - self.assertEqual(num_statements, len(results)) - self.assertEqual([(True, [(i,)]) for i in range(num_statements)], results) + def test_execute_concurrent_with_args(self): + self.execute_concurrent_base(self.execute_concurrent_args_helper, \ + self.execute_concurrent_valiate_tuple, \ + zip_args=False) + + def test_execute_concurrent_with_execution_profile(self): + def run_fn(*args, **kwargs): + return self.execute_concurrent_helper(*args, execution_profile=EXEC_PROFILE_DICT, **kwargs) + self.execute_concurrent_base(run_fn, self.execute_concurrent_valiate_dict) + + def test_execute_concurrent_with_args_and_execution_profile(self): + def run_fn(*args, **kwargs): + return self.execute_concurrent_args_helper(*args, execution_profile=EXEC_PROFILE_DICT, **kwargs) + self.execute_concurrent_base(run_fn, self.execute_concurrent_valiate_dict, zip_args=False) def test_execute_concurrent_with_args_generator(self): """ From a7295e103023e12152fc0940906071b18356def3 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 17 Mar 2022 14:06:08 -0500 Subject: [PATCH 019/551] PYTHON-1294: Upgrade importlib-metadata to a much newer version --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7e1e374822..906775e90c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ python: - "3.5" - "3.6" - "3.7" + - "3.8" - "pypy2.7-6.0" - "pypy3.5" @@ -24,7 +25,7 @@ addons: - libev-dev install: - - pip install --upgrade setuptools + - pip install --upgrade setuptools importlib-metadata - pip install tox-travis - if [[ $TRAVIS_PYTHON_VERSION != pypy3.5 ]]; then pip install lz4; fi From 6ad836c8d1d4c37592223e58fae3c5dfe93c4a81 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 27 Mar 2022 10:17:08 +0300 Subject: [PATCH 020/551] Metadata/Schema paginated queries New Cluster property `schema_metadata_page_size` that controls the page size of metadata queries, defaults to 1000. Works only on CQL protocol v3/v4 Fixes: #139 --- cassandra/cluster.py | 22 +++- cassandra/metadata.py | 137 +++++++++++--------- tests/integration/standard/test_metadata.py | 10 ++ 3 files changed, 108 insertions(+), 61 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6894f1a6c0..c81c7835a9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1038,6 +1038,17 @@ def schema_metadata_enabled(self): def schema_metadata_enabled(self, enabled): self.control_connection._schema_meta_enabled = bool(enabled) + @property + def schema_metadata_page_size(self): + """ + Number controling page size when schema metadata is fetched. + """ + return self.control_connection._schema_meta_page_size + + @schema_metadata_page_size.setter + def schema_metadata_page_size(self, size): + self.control_connection._schema_meta_page_size = size + @property def token_metadata_enabled(self): """ @@ -1108,6 +1119,7 @@ def __init__(self, connect_timeout=5, schema_metadata_enabled=True, token_metadata_enabled=True, + schema_metadata_page_size=1000, address_translator=None, status_event_refresh_window=2, prepare_on_all_hosts=True, @@ -1373,7 +1385,8 @@ def __init__(self, self, self.control_connection_timeout, self.schema_event_refresh_window, self.topology_event_refresh_window, self.status_event_refresh_window, - schema_metadata_enabled, token_metadata_enabled) + schema_metadata_enabled, token_metadata_enabled, + schema_meta_page_size=schema_metadata_page_size) if client_id is None: self.client_id = uuid.uuid4() @@ -3485,6 +3498,7 @@ class PeersQueryType(object): _schema_meta_enabled = True _token_meta_enabled = True + _schema_meta_page_size = 1000 _uses_peers_v2 = True @@ -3496,7 +3510,8 @@ def __init__(self, cluster, timeout, topology_event_refresh_window, status_event_refresh_window, schema_meta_enabled=True, - token_meta_enabled=True): + token_meta_enabled=True, + schema_meta_page_size=1000): # use a weak reference to allow the Cluster instance to be GC'ed (and # shutdown) since implementing __del__ disables the cycle detector self._cluster = weakref.proxy(cluster) @@ -3508,6 +3523,7 @@ def __init__(self, cluster, timeout, self._status_event_refresh_window = status_event_refresh_window self._schema_meta_enabled = schema_meta_enabled self._token_meta_enabled = token_meta_enabled + self._schema_meta_page_size = schema_meta_page_size self._lock = RLock() self._schema_agreement_lock = Lock() @@ -3732,7 +3748,7 @@ def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_w log.debug("Skipping schema refresh due to lack of schema agreement") return False - self._cluster.metadata.refresh(connection, self._timeout, **kwargs) + self._cluster.metadata.refresh(connection, self._timeout, fetch_size=self._schema_meta_page_size, **kwargs) return True diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 131900b323..82eecccc21 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -26,6 +26,7 @@ from threading import RLock import struct import random +import itertools murmur3 = None try: @@ -132,11 +133,11 @@ def export_schema_as_string(self): """ return "\n\n".join(ks.export_as_string() for ks in self.keyspaces.values()) - def refresh(self, connection, timeout, target_type=None, change_type=None, **kwargs): + def refresh(self, connection, timeout, target_type=None, change_type=None, fetch_size=None, **kwargs): server_version = self.get_host(connection.original_endpoint).release_version dse_version = self.get_host(connection.original_endpoint).dse_version - parser = get_schema_parser(connection, server_version, dse_version, timeout) + parser = get_schema_parser(connection, server_version, dse_version, timeout, fetch_size) if not target_type: self._rebuild_all(parser) @@ -1924,7 +1925,7 @@ def __init__(self, connection, timeout): self.connection = connection self.timeout = timeout - def _handle_results(self, success, result, expected_failures=tuple()): + def _handle_results(self, success, result, expected_failures=tuple(), query_msg=None, timeout=None): """ Given a bool and a ResultSet (the form returned per result from Connection.wait_for_responses), return a dictionary containing the @@ -1945,9 +1946,26 @@ def _handle_results(self, success, result, expected_failures=tuple()): query failed, but raised an instance of an expected failure class, this will ignore the failure and return an empty list. """ + timeout = timeout or self.timeout if not success and isinstance(result, expected_failures): return [] elif success: + if result.paging_state and query_msg: + def get_next_pages(): + next_result = None + while True: + query_msg.paging_state = next_result.paging_state if next_result else result.paging_state + next_success, next_result = self.connection.wait_for_response(query_msg, timeout=timeout, + fail_on_error=False) + if not next_success and isinstance(next_result, expected_failures): + continue + elif not next_success: + raise next_result + if not next_result.paging_state: + break + yield next_result.parsed_rows + + result.parsed_rows += itertools.chain(*get_next_pages()) return dict_factory(result.column_names, result.parsed_rows) if result else [] else: raise result @@ -2532,8 +2550,9 @@ class SchemaParserV3(SchemaParserV22): 'read_repair_chance', 'speculative_retry') - def __init__(self, connection, timeout): + def __init__(self, connection, timeout, fetch_size): super(SchemaParserV3, self).__init__(connection, timeout) + self.fetch_size = fetch_size self.indexes_result = [] self.keyspace_table_index_rows = defaultdict(lambda: defaultdict(list)) self.keyspace_view_rows = defaultdict(list) @@ -2726,17 +2745,18 @@ def _build_trigger_metadata(table_metadata, row): def _query_all(self): cl = ConsistencyLevel.ONE + fetch_size = self.fetch_size queries = [ - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), - QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl), - QueryMessage(query=self._SELECT_SCYLLA, consistency_level=cl) + QueryMessage(query=self._SELECT_KEYSPACES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_TABLES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_COLUMNS, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_TYPES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_FUNCTIONS, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_AGGREGATES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_TRIGGERS, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_INDEXES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_VIEWS, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_SCYLLA, fetch_size=fetch_size, consistency_level=cl) ] ((ks_success, ks_result), @@ -2752,16 +2772,16 @@ def _query_all(self): *queries, timeout=self.timeout, fail_on_error=False ) - self.keyspaces_result = self._handle_results(ks_success, ks_result) - self.tables_result = self._handle_results(table_success, table_result) - self.columns_result = self._handle_results(col_success, col_result) - self.triggers_result = self._handle_results(triggers_success, triggers_result) - self.types_result = self._handle_results(types_success, types_result) - self.functions_result = self._handle_results(functions_success, functions_result) - self.aggregates_result = self._handle_results(aggregates_success, aggregates_result) - self.indexes_result = self._handle_results(indexes_success, indexes_result) - self.views_result = self._handle_results(views_success, views_result) - self.scylla_result = self._handle_results(scylla_success, scylla_result, expected_failures=(InvalidRequest,)) + self.keyspaces_result = self._handle_results(ks_success, ks_result, query_msg=queries[0]) + self.tables_result = self._handle_results(table_success, table_result, query_msg=queries[1]) + self.columns_result = self._handle_results(col_success, col_result, query_msg=queries[2]) + self.triggers_result = self._handle_results(triggers_success, triggers_result, query_msg=queries[6]) + self.types_result = self._handle_results(types_success, types_result, query_msg=queries[3]) + self.functions_result = self._handle_results(functions_success, functions_result, query_msg=queries[4]) + self.aggregates_result = self._handle_results(aggregates_success, aggregates_result, query_msg=queries[5]) + self.indexes_result = self._handle_results(indexes_success, indexes_result, query_msg=queries[7]) + self.views_result = self._handle_results(views_success, views_result, query_msg=queries[8]) + self.scylla_result = self._handle_results(scylla_success, scylla_result, expected_failures=(InvalidRequest,), query_msg=queries[9]) self._aggregate_results() @@ -2814,8 +2834,8 @@ class SchemaParserV4(SchemaParserV3): _SELECT_VIRTUAL_TABLES = 'SELECT * from system_virtual_schema.tables' _SELECT_VIRTUAL_COLUMNS = 'SELECT * from system_virtual_schema.columns' - def __init__(self, connection, timeout): - super(SchemaParserV4, self).__init__(connection, timeout) + def __init__(self, connection, timeout, fetch_size): + super(SchemaParserV4, self).__init__(connection, timeout, fetch_size) self.virtual_keyspaces_rows = defaultdict(list) self.virtual_tables_rows = defaultdict(list) self.virtual_columns_rows = defaultdict(lambda: defaultdict(list)) @@ -2824,21 +2844,22 @@ def _query_all(self): cl = ConsistencyLevel.ONE # todo: this duplicates V3; we should find a way for _query_all methods # to extend each other. + fetch_size = self.fetch_size queries = [ # copied from V3 - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), - QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl), + QueryMessage(query=self._SELECT_KEYSPACES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_TABLES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_COLUMNS, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_TYPES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_FUNCTIONS, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_AGGREGATES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_TRIGGERS, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_INDEXES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_VIEWS, fetch_size=fetch_size, consistency_level=cl), # V4-only queries - QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, consistency_level=cl) + QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_VIRTUAL_TABLES, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, fetch_size=fetch_size, consistency_level=cl) ] responses = self.connection.wait_for_responses( @@ -2861,29 +2882,29 @@ def _query_all(self): ) = responses # copied from V3 - self.keyspaces_result = self._handle_results(ks_success, ks_result) - self.tables_result = self._handle_results(table_success, table_result) - self.columns_result = self._handle_results(col_success, col_result) - self.triggers_result = self._handle_results(triggers_success, triggers_result) - self.types_result = self._handle_results(types_success, types_result) - self.functions_result = self._handle_results(functions_success, functions_result) - self.aggregates_result = self._handle_results(aggregates_success, aggregates_result) - self.indexes_result = self._handle_results(indexes_success, indexes_result) - self.views_result = self._handle_results(views_success, views_result) + self.keyspaces_result = self._handle_results(ks_success, ks_result, query_msg=queries[0]) + self.tables_result = self._handle_results(table_success, table_result, query_msg=queries[1]) + self.columns_result = self._handle_results(col_success, col_result, query_msg=queries[2]) + self.triggers_result = self._handle_results(triggers_success, triggers_result, query_msg=queries[6]) + self.types_result = self._handle_results(types_success, types_result, query_msg=queries[3]) + self.functions_result = self._handle_results(functions_success, functions_result, query_msg=queries[4]) + self.aggregates_result = self._handle_results(aggregates_success, aggregates_result, query_msg=queries[5]) + self.indexes_result = self._handle_results(indexes_success, indexes_result, query_msg=queries[7]) + self.views_result = self._handle_results(views_success, views_result, query_msg=queries[8]) # V4-only results # These tables don't exist in some DSE versions reporting 4.X so we can # ignore them if we got an error self.virtual_keyspaces_result = self._handle_results( virtual_ks_success, virtual_ks_result, - expected_failures=(InvalidRequest,) + expected_failures=(InvalidRequest,), query_msg=queries[9] ) self.virtual_tables_result = self._handle_results( virtual_table_success, virtual_table_result, - expected_failures=(InvalidRequest,) + expected_failures=(InvalidRequest,), query_msg=queries[10] ) self.virtual_columns_result = self._handle_results( virtual_column_success, virtual_column_result, - expected_failures=(InvalidRequest,) + expected_failures=(InvalidRequest,), query_msg=queries[11] ) self._aggregate_results() @@ -2948,8 +2969,8 @@ class SchemaParserDSE68(SchemaParserDSE67): _table_metadata_class = TableMetadataDSE68 - def __init__(self, connection, timeout): - super(SchemaParserDSE68, self).__init__(connection, timeout) + def __init__(self, connection, timeout, fetch_size): + super(SchemaParserDSE68, self).__init__(connection, timeout, fetch_size) self.keyspace_table_vertex_rows = defaultdict(lambda: defaultdict(list)) self.keyspace_table_edge_rows = defaultdict(lambda: defaultdict(list)) @@ -3314,21 +3335,21 @@ def __init__( self.to_clustering_columns = to_clustering_columns -def get_schema_parser(connection, server_version, dse_version, timeout): +def get_schema_parser(connection, server_version, dse_version, timeout, fetch_size=None): version = Version(server_version) if dse_version: v = Version(dse_version) if v >= Version('6.8.0'): - return SchemaParserDSE68(connection, timeout) + return SchemaParserDSE68(connection, timeout, fetch_size) elif v >= Version('6.7.0'): - return SchemaParserDSE67(connection, timeout) + return SchemaParserDSE67(connection, timeout, fetch_size) elif v >= Version('6.0.0'): - return SchemaParserDSE60(connection, timeout) + return SchemaParserDSE60(connection, timeout, fetch_size) if version >= Version('4-a'): - return SchemaParserV4(connection, timeout) + return SchemaParserV4(connection, timeout, fetch_size) elif version >= Version('3.0.0'): - return SchemaParserV3(connection, timeout) + return SchemaParserV3(connection, timeout, fetch_size) else: # we could further specialize by version. Right now just refactoring the # multi-version parser we have as of C* 2.2.0rc1. diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 826707c012..61db69bbed 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1047,6 +1047,16 @@ class Ext1(Ext0): self.assertIn(Ext0.after_table_cql(view_meta, Ext0.name, ext_map[Ext0.name]), new_cql) self.assertIn(Ext1.after_table_cql(view_meta, Ext1.name, ext_map[Ext1.name]), new_cql) + def test_metadata_pagination(self): + self.cluster.refresh_schema_metadata() + for i in range(10): + self.session.execute("CREATE TABLE %s.%s_%d (a int PRIMARY KEY, b map)" + % (self.keyspace_name, self.function_table_name, i)) + + self.cluster.schema_metadata_page_size = 5 + self.cluster.refresh_schema_metadata() + self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].tables), 10) + class TestCodeCoverage(unittest.TestCase): From 66046ded8e20006adc968b3d7d32d025e4eab679 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 28 Mar 2022 18:45:26 +0300 Subject: [PATCH 021/551] Release 3.25.3 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index e550cfb2d2..97acb762e9 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 2) +__version_info__ = (3, 25, 3) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 206b152cb8..bb129a710c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -64,14 +64,14 @@ # -- Options for multiversion -------------------------------------------------- # Whitelist pattern for tags (set to None to ignore all tags) -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.2-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.3-scylla'] smv_tag_whitelist = multiversion_regex_builder(TAGS) # Whitelist pattern for branches (set to None to ignore all branches) BRANCHES = ['master'] smv_branch_whitelist = multiversion_regex_builder(BRANCHES) # Defines which version is considered to be the latest stable version. # Must be listed in smv_tag_whitelist or smv_branch_whitelist. -smv_latest_version = '3.25.2-scylla' +smv_latest_version = '3.25.3-scylla' smv_rename_latest_version = 'stable' # Whitelist pattern for remotes (set to None to use local branches only) smv_remote_whitelist = r"^origin$" From 989ee1c511201a8c39c3404b8c5d008c1bf2e9cd Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 29 Mar 2022 10:15:38 +0300 Subject: [PATCH 022/551] Metadata/Schema paginated queries [continuation] Seems like that in #140, not all the queries coming out of the metadata were covered (i.e. paginated), which was still showing in `scylla_cql_unpaged_select_queries` counter. all schema agreement queries are still unpaged: ``` SELECT peer, host_id, rpc_address, schema_version FROM system.peers SELECT schema_version FROM system.local WHERE key='local' ``` --- cassandra/metadata.py | 50 +++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 82eecccc21..d70ba6dfb9 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1921,9 +1921,10 @@ def export_as_string(self): class _SchemaParser(object): - def __init__(self, connection, timeout): + def __init__(self, connection, timeout, fetch_size): self.connection = connection self.timeout = timeout + self.fetch_size = fetch_size def _handle_results(self, success, result, expected_failures=tuple(), query_msg=None, timeout=None): """ @@ -1975,17 +1976,13 @@ def _query_build_row(self, query_string, build_func): return result[0] if result else None def _query_build_rows(self, query_string, build_func): - query = QueryMessage(query=query_string, consistency_level=ConsistencyLevel.ONE) + query = QueryMessage(query=query_string, consistency_level=ConsistencyLevel.ONE, fetch_size=self.fetch_size) responses = self.connection.wait_for_responses((query), timeout=self.timeout, fail_on_error=False) (success, response) = responses[0] - if success: - result = dict_factory(response.column_names, response.parsed_rows) - return [build_func(row) for row in result] - elif isinstance(response, InvalidRequest): + results = self._handle_results(success, response, expected_failures=(InvalidRequest), query_msg=query) + if not results: log.debug("user types table not found") - return [] - else: - raise response + return [build_func(row) for row in results] class SchemaParserV22(_SchemaParser): @@ -2029,8 +2026,8 @@ class SchemaParserV22(_SchemaParser): "compression", "default_time_to_live") - def __init__(self, connection, timeout): - super(SchemaParserV22, self).__init__(connection, timeout) + def __init__(self, connection, timeout, fetch_size): + super(SchemaParserV22, self).__init__(connection, timeout, fetch_size) self.keyspaces_result = [] self.tables_result = [] self.columns_result = [] @@ -2551,8 +2548,7 @@ class SchemaParserV3(SchemaParserV22): 'speculative_retry') def __init__(self, connection, timeout, fetch_size): - super(SchemaParserV3, self).__init__(connection, timeout) - self.fetch_size = fetch_size + super(SchemaParserV3, self).__init__(connection, timeout, fetch_size) self.indexes_result = [] self.keyspace_table_index_rows = defaultdict(lambda: defaultdict(list)) self.keyspace_view_rows = defaultdict(list) @@ -2566,17 +2562,18 @@ def get_all_keyspaces(self): def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE + fetch_size = self.fetch_size where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), _encoder) - cf_query = QueryMessage(query=self._SELECT_TABLES + where_clause, consistency_level=cl) - col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) - indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl) - triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) - scylla_query = QueryMessage(query=self._SELECT_SCYLLA + where_clause, consistency_level=cl) + cf_query = QueryMessage(query=self._SELECT_TABLES + where_clause, consistency_level=cl, fetch_size=fetch_size) + col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl, fetch_size=fetch_size) + indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl, fetch_size=fetch_size) + triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl, fetch_size=fetch_size) + scylla_query = QueryMessage(query=self._SELECT_SCYLLA + where_clause, consistency_level=cl, fetch_size=fetch_size) # in protocol v4 we don't know if this event is a view or a table, so we look for both where_clause = bind_params(" WHERE keyspace_name = %s AND view_name = %s", (keyspace, table), _encoder) view_query = QueryMessage(query=self._SELECT_VIEWS + where_clause, - consistency_level=cl) + consistency_level=cl, fetch_size=fetch_size) ((cf_success, cf_result), (col_success, col_result), (indexes_sucess, indexes_result), (triggers_success, triggers_result), (view_success, view_result), @@ -2585,14 +2582,15 @@ def get_table(self, keyspaces, keyspace, table): cf_query, col_query, indexes_query, triggers_query, view_query, scylla_query, timeout=self.timeout, fail_on_error=False) ) - table_result = self._handle_results(cf_success, cf_result) - col_result = self._handle_results(col_success, col_result) + table_result = self._handle_results(cf_success, cf_result, query_msg=cf_query) + col_result = self._handle_results(col_success, col_result, query_msg=col_query) if table_result: - indexes_result = self._handle_results(indexes_sucess, indexes_result) - triggers_result = self._handle_results(triggers_success, triggers_result) + indexes_result = self._handle_results(indexes_sucess, indexes_result, query_msg=indexes_query) + triggers_result = self._handle_results(triggers_success, triggers_result, query_msg=triggers_query) # in_memory property is stored in scylla private table # add it to table properties if enabled - scylla_result = self._handle_results(scylla_success, scylla_result, expected_failures=(InvalidRequest,)) + scylla_result = self._handle_results(scylla_success, scylla_result, expected_failures=(InvalidRequest,), + query_msg=scylla_query) try: if scylla_result[0]["in_memory"] == True: table_result[0]["in_memory"] = True @@ -2600,7 +2598,7 @@ def get_table(self, keyspaces, keyspace, table): pass return self._build_table_metadata(table_result[0], col_result, triggers_result, indexes_result) - view_result = self._handle_results(view_success, view_result) + view_result = self._handle_results(view_success, view_result, query_msg=view_query) if view_result: return self._build_view_metadata(view_result[0], col_result) @@ -3353,7 +3351,7 @@ def get_schema_parser(connection, server_version, dse_version, timeout, fetch_si else: # we could further specialize by version. Right now just refactoring the # multi-version parser we have as of C* 2.2.0rc1. - return SchemaParserV22(connection, timeout) + return SchemaParserV22(connection, timeout, fetch_size) def _cql_from_cass_type(cass_type): From 439de0027dfcd5e15650ee2a7323e624cc16a935 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 29 Mar 2022 22:54:25 +0300 Subject: [PATCH 023/551] Release 3.25.4 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 97acb762e9..4966da3aaf 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 3) +__version_info__ = (3, 25, 4) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index bb129a710c..ffceb0a3a4 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -64,14 +64,14 @@ # -- Options for multiversion -------------------------------------------------- # Whitelist pattern for tags (set to None to ignore all tags) -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.3-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla'] smv_tag_whitelist = multiversion_regex_builder(TAGS) # Whitelist pattern for branches (set to None to ignore all branches) BRANCHES = ['master'] smv_branch_whitelist = multiversion_regex_builder(BRANCHES) # Defines which version is considered to be the latest stable version. # Must be listed in smv_tag_whitelist or smv_branch_whitelist. -smv_latest_version = '3.25.3-scylla' +smv_latest_version = '3.25.4-scylla' smv_rename_latest_version = 'stable' # Whitelist pattern for remotes (set to None to use local branches only) smv_remote_whitelist = r"^origin$" From c7692ab8a66c0e5d4b370c75669bff725e353dc3 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 1 Apr 2022 12:17:52 +0100 Subject: [PATCH 024/551] docs: update theme 1.2.1 Update extensions Lint conf.py Fix CI warning Fix CI warning Fix warning Fix warning --- .github/workflows/docs-links.yaml | 34 +++++++++++++++++++ .github/workflows/docs-pages.yaml | 12 +++---- .github/workflows/docs-pr.yaml | 12 +++---- .lycheeignore | 1 + docs/Makefile | 55 +++++++++++++++++-------------- docs/conf.py | 45 +++++++++++++++++++------ docs/pyproject.toml | 12 +++---- 7 files changed, 119 insertions(+), 52 deletions(-) create mode 100644 .github/workflows/docs-links.yaml create mode 100644 .lycheeignore diff --git a/.github/workflows/docs-links.yaml b/.github/workflows/docs-links.yaml new file mode 100644 index 0000000000..966c95a7a8 --- /dev/null +++ b/.github/workflows/docs-links.yaml @@ -0,0 +1,34 @@ +name: "Docs / Links" +# For more information, +# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows + +on: + workflow_dispatch: + schedule: + - cron: "0 0 * * 0" # At 00:00 on Sunday + +jobs: + linkChecker: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Link Checker + id: lychee + uses: lycheeverse/lychee-action@v1.4.1 + with: + args: --verbose --no-progress './**/*.md' './**/*.rst' + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + + - name: Create Issue From File + if: ${{ steps.lychee.outputs.exit_code != 0 }} + uses: peter-evans/create-issue-from-file@v4 + with: + title: Link Checker Report + content-filepath: ./lychee/out.md + labels: report, automated issue diff --git a/.github/workflows/docs-pages.yaml b/.github/workflows/docs-pages.yaml index 889affa11a..5965790c6f 100644 --- a/.github/workflows/docs-pages.yaml +++ b/.github/workflows/docs-pages.yaml @@ -1,4 +1,6 @@ name: "Docs / Publish" +# For more information, +# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows on: push: @@ -13,20 +15,18 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: persist-credentials: false fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v2.3.2 + uses: actions/setup-python@v3 with: python-version: 3.7 - - name: Setup Cassandra dependencies - run: sudo apt-get install gcc python-dev libev4 libev-dev + - name: Set up env + run: make -C docs setupenv - name: Build driver run: python setup.py develop - - name: Set up Poetry - run: curl -sSL https://install.python-poetry.org | python - - name: Build docs run: make -C docs multiversion - name: Deploy docs to GitHub Pages diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index e4d3366f79..203d41aed5 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -1,4 +1,6 @@ name: "Docs / Build PR" +# For more information, +# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows on: pull_request: @@ -12,19 +14,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: persist-credentials: false fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v2.3.2 + uses: actions/setup-python@v3 with: python-version: 3.7 - - name: Setup Cassandra dependencies - run: sudo apt-get install gcc python-dev libev4 libev-dev + - name: Set up env + run: make -C docs setupenv - name: Build driver run: python setup.py develop - - name: Set up Poetry - run: curl -sSL https://install.python-poetry.org | python - - name: Build docs run: make -C docs test diff --git a/.lycheeignore b/.lycheeignore new file mode 100644 index 0000000000..dce392204c --- /dev/null +++ b/.lycheeignore @@ -0,0 +1 @@ +http://127.0.0.1 \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile index 3423b9e723..c6b8b5c53a 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,40 +1,47 @@ +# Global variables # You can set these variables from the command line. -POETRY = $(HOME)/.local/bin/poetry +POETRY = poetry SPHINXOPTS = SPHINXBUILD = $(POETRY) run sphinx-build PAPER = BUILDDIR = _build SOURCEDIR = . -# Internal variables. +# Internal variables PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) TESTSPHINXOPTS = $(ALLSPHINXOPTS) -W --keep-going -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) +# Windows variables +ifeq ($(OS),Windows_NT) + POETRY = $(APPDATA)\Python\Scripts\poetry +endif .PHONY: all all: dirhtml -.PHONY: pristine -pristine: clean - git clean -dfX +# Setup commands +.PHONY: setupenv +setupenv: + pip install -q poetry + sudo apt-get install gcc python-dev libev4 libev-dev .PHONY: setup setup: $(POETRY) install $(POETRY) update +# Clean commands +.PHONY: pristine +pristine: clean + git clean -dfX + .PHONY: clean clean: rm -rf $(BUILDDIR)/* -.PHONY: preview -preview: setup - $(POETRY) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --port 5500 - +# Generate output commands .PHONY: dirhtml dirhtml: setup $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @@ -48,39 +55,39 @@ singlehtml: setup @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: epub -epub: setup +epub: setup $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 -epub3: setup +epub3: setup $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." -.PHONY: dummy -dummy: setup - $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy - @echo - @echo "Build finished. Dummy builder generates no files." - -.PHONY: linkcheck -linkcheck: setup - $(SPHINXBUILD) -b linkcheck $(SOURCEDIR) $(BUILDDIR)/linkcheck - .PHONY: multiversion multiversion: setup $(POETRY) run sphinx-multiversion $(SOURCEDIR) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." +# Preview commands +.PHONY: preview +preview: setup + $(POETRY) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --port 5500 + .PHONY: multiversionpreview multiversionpreview: multiversion $(POETRY) run python -m http.server 5500 --directory $(BUILDDIR)/dirhtml +# Test commands .PHONY: test test: setup $(SPHINXBUILD) -b dirhtml $(TESTSPHINXOPTS) $(BUILDDIR)/dirhtml @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." \ No newline at end of file + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: linkcheck +linkcheck: setup + $(SPHINXBUILD) -b linkcheck $(SOURCEDIR) $(BUILDDIR)/linkcheck diff --git a/docs/conf.py b/docs/conf.py index ffceb0a3a4..904325202a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,9 +10,29 @@ # -- General configuration ----------------------------------------------------- +# Build documentation for the following tags and branches +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla'] +BRANCHES = ['master'] +# Set the latest version. +LATEST_VERSION = '3.25.4-scylla' +# Set which versions are not released yet. +UNSTABLE_VERSIONS = ['master'] +# Set which versions are deprecated +DEPRECATED_VERSIONS = [''] + # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.githubpages', 'sphinx.ext.viewcode', 'sphinx_scylladb_theme', 'sphinx_multiversion', 'recommonmark'] +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.mathjax', + 'sphinx.ext.githubpages', + 'sphinx.ext.extlinks', + 'sphinx_sitemap', + 'sphinx_scylladb_theme', + 'sphinx_multiversion', # optional + 'recommonmark', # optional +] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -60,32 +80,35 @@ # -- Options for redirect extension -------------------------------------------- # Read a YAML dictionary of redirections and generate an HTML file for each -redirects_file = "_utils/redirections.yaml" +redirects_file = '_utils/redirections.yaml' # -- Options for multiversion -------------------------------------------------- -# Whitelist pattern for tags (set to None to ignore all tags) -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla'] + +# Whitelist pattern for tags smv_tag_whitelist = multiversion_regex_builder(TAGS) -# Whitelist pattern for branches (set to None to ignore all branches) -BRANCHES = ['master'] +# Whitelist pattern for branches smv_branch_whitelist = multiversion_regex_builder(BRANCHES) # Defines which version is considered to be the latest stable version. -# Must be listed in smv_tag_whitelist or smv_branch_whitelist. -smv_latest_version = '3.25.4-scylla' +smv_latest_version = LATEST_VERSION +# Defines the new name for the latest version. smv_rename_latest_version = 'stable' # Whitelist pattern for remotes (set to None to use local branches only) -smv_remote_whitelist = r"^origin$" +smv_remote_whitelist = r'^origin$' # Pattern for released versions smv_released_pattern = r'^tags/.*$' # Format for versioned output directories inside the build directory smv_outputdir_format = '{ref.name}' -# -- Options for HTML output --------------------------------------------------- +# -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_scylladb_theme' +# -- Options for sitemap extension --------------------------------------- + +sitemap_url_scheme = 'stable/{link}' + # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. @@ -95,6 +118,8 @@ 'github_issues_repository': 'scylladb/python-driver', 'hide_edit_this_page_button': 'false', 'hide_version_dropdown': ['master'], + 'versions_unstable': UNSTABLE_VERSIONS, + 'versions_deprecated': DEPRECATED_VERSIONS, } # Custom sidebar templates, maps document names to template names. diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 359b7950ed..82bd20386e 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -11,14 +11,14 @@ geomet = "0.1.2" gevent = "20.12.1" gremlinpython = "3.4.7" python = "^3.7" -pyyaml = "^6.0" +pyyaml = "6.0" pygments = "2.2.0" -recommonmark = "^0.7.1" -sphinx-autobuild = "^2021.3.14" +recommonmark = "0.7.1" +sphinx-autobuild = "2021.3.14" sphinx-sitemap = "2.1.0" -sphinx-scylladb-theme = "~1.1.0" -sphinx-multiversion-scylla = "~0.2.10" -Sphinx = "^4.3.2" +sphinx-scylladb-theme = "~1.2.1" +sphinx-multiversion-scylla = "~0.2.11" +Sphinx = "4.3.2" scales = "1.0.9" six = "1.15.0" From 9a645c58ca0ec57f775251f94e55c30aa837b2ad Mon Sep 17 00:00:00 2001 From: Emmanuel Arias Date: Tue, 17 May 2022 15:24:12 -0300 Subject: [PATCH 025/551] Merge pull request #1126 from eamanu/fix-typos Fix typos detected by Lintian during the packaging In Debian --- cassandra/cluster.py | 2 +- cassandra/util.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index cf78725f17..c836fb4302 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2391,7 +2391,7 @@ def default_consistency_level(self, cl): *Deprecated:* use execution profiles instead """ warn("Setting the consistency level at the session level will be removed in 4.0. Consider using " - "execution profiles and setting the desired consitency level to the EXEC_PROFILE_DEFAULT profile." + "execution profiles and setting the desired consistency level to the EXEC_PROFILE_DEFAULT profile." , DeprecationWarning) self._validate_set_legacy_config('default_consistency_level', cl) diff --git a/cassandra/util.py b/cassandra/util.py index f896ff4f86..dd5c58b01d 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -797,7 +797,7 @@ class OrderedMap(Mapping): ''' An ordered map that accepts non-hashable types for keys. It also maintains the insertion order of items, behaving as OrderedDict in that regard. These maps - are constructed and read just as normal mapping types, exept that they may + are constructed and read just as normal mapping types, except that they may contain arbitrary collections and other non-hashable items as keys:: >>> od = OrderedMap([({'one': 1, 'two': 2}, 'value'), From 94b64bb5571ed9c47d8cb7e8e19fcb6806cf1f2f Mon Sep 17 00:00:00 2001 From: Anna Stuchlik Date: Wed, 25 May 2022 15:20:18 +0200 Subject: [PATCH 026/551] update the project name in the documentation --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 904325202a..1e73959afc 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -47,7 +47,7 @@ master_doc = 'index' # General information about the project. -project = u'Cassandra Driver' +project = u'Scylla Python Driver' copyright = u'ScyllaDB 2021 and © DataStax 2013-2017' # The version info for the project you're documenting, acts as replacement for From a21df9750b341db7a93de31e724d589a67dde65b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 15 Jun 2022 18:59:03 +0200 Subject: [PATCH 027/551] cassandra/metadata.py: Add missing CQL reserved keywords used by Scylla Some Scylla-specific reserved CQL keywords were missing from cassandra/metadata.py: cast, scylla_clustering_bound, scylla_counter_shard_list, scylla_timeuuid_list_index --- cassandra/metadata.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index d70ba6dfb9..413663002c 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -49,16 +49,16 @@ cql_keywords = set(( 'add', 'aggregate', 'all', 'allow', 'alter', 'and', 'apply', 'as', 'asc', 'ascii', 'authorize', 'batch', 'begin', - 'bigint', 'blob', 'boolean', 'by', 'called', 'clustering', 'columnfamily', 'compact', 'contains', 'count', + 'bigint', 'blob', 'boolean', 'by', 'cast', 'called', 'clustering', 'columnfamily', 'compact', 'contains', 'count', 'counter', 'create', 'custom', 'date', 'decimal', 'default', 'delete', 'desc', 'describe', 'deterministic', 'distinct', 'double', 'drop', 'entries', 'execute', 'exists', 'filtering', 'finalfunc', 'float', 'from', 'frozen', 'full', 'function', 'functions', 'grant', 'if', 'in', 'index', 'inet', 'infinity', 'initcond', 'input', 'insert', 'int', 'into', 'is', 'json', 'key', 'keys', 'keyspace', 'keyspaces', 'language', 'limit', 'list', 'login', 'map', 'materialized', 'mbean', 'mbeans', 'modify', 'monotonic', 'nan', 'nologin', 'norecursive', 'nosuperuser', 'not', 'null', 'of', 'on', 'options', 'or', 'order', 'password', 'permission', - 'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'select', 'set', - 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', 'table', 'text', 'time', 'timestamp', 'timeuuid', - 'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', 'unset', 'update', 'use', 'user', - 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'view', 'where', 'with', 'writetime', + 'permissions', 'primary', 'rename', 'replace', 'returns', 'revoke', 'role', 'roles', 'schema', 'scylla_clustering_bound', + 'scylla_counter_shard_list', 'scylla_timeuuid_list_index', 'select', 'set', 'sfunc', 'smallint', 'static', 'storage', 'stype', 'superuser', + 'table', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'to', 'token', 'trigger', 'truncate', 'ttl', 'tuple', 'type', 'unlogged', + 'unset', 'update', 'use', 'user', 'users', 'using', 'uuid', 'values', 'varchar', 'varint', 'view', 'where', 'with', 'writetime', # DSE specifics "node", "nodes", "plan", "active", "application", "applications", "java", "executor", "executors", "std_out", "std_err", From 181be7bffd07661d7b77c42d17c6e0076ef17cb4 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 17 May 2022 12:28:39 +0100 Subject: [PATCH 028/551] docs: disable link checker --- .github/workflows/docs-links.yaml | 34 ------------------------------- .lycheeignore | 1 - 2 files changed, 35 deletions(-) delete mode 100644 .github/workflows/docs-links.yaml delete mode 100644 .lycheeignore diff --git a/.github/workflows/docs-links.yaml b/.github/workflows/docs-links.yaml deleted file mode 100644 index 966c95a7a8..0000000000 --- a/.github/workflows/docs-links.yaml +++ /dev/null @@ -1,34 +0,0 @@ -name: "Docs / Links" -# For more information, -# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows - -on: - workflow_dispatch: - schedule: - - cron: "0 0 * * 0" # At 00:00 on Sunday - -jobs: - linkChecker: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v3 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Link Checker - id: lychee - uses: lycheeverse/lychee-action@v1.4.1 - with: - args: --verbose --no-progress './**/*.md' './**/*.rst' - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} - - - name: Create Issue From File - if: ${{ steps.lychee.outputs.exit_code != 0 }} - uses: peter-evans/create-issue-from-file@v4 - with: - title: Link Checker Report - content-filepath: ./lychee/out.md - labels: report, automated issue diff --git a/.lycheeignore b/.lycheeignore deleted file mode 100644 index dce392204c..0000000000 --- a/.lycheeignore +++ /dev/null @@ -1 +0,0 @@ -http://127.0.0.1 \ No newline at end of file From 8dc076429c6d0e7395c206f3a2e5cd1af10e0415 Mon Sep 17 00:00:00 2001 From: Alejo Sanchez Date: Fri, 24 Jun 2022 10:37:17 +0200 Subject: [PATCH 029/551] Handle port passed as string to Cluster If port number is passed as string the driver works but it starts failing later on address search. Check port passed as string is valid number and convert it to int. Signed-off-by: Alejo Sanchez --- cassandra/cluster.py | 7 +++++++ tests/unit/test_cluster.py | 10 ++++++++++ 2 files changed, 17 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index c81c7835a9..ed5dfbddf7 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1144,6 +1144,13 @@ def __init__(self, Any of the mutable Cluster attributes may be set as keyword arguments to the constructor. """ + + # Handle port passed as string + if isinstance(port, str): + if not port.isdigit(): + raise ValueError("Only numeric values are supported for port (%s)" % port) + port = int(port) + if connection_class is not None: self.connection_class = connection_class diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 2c9ebd3872..816492e72e 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -121,6 +121,16 @@ def test_requests_in_flight_threshold(self): for n in (0, mn, 128): self.assertRaises(ValueError, c.set_max_requests_per_connection, d, n) + def test_port_str(self): + """Check port passed as tring is converted and checked properly""" + cluster = Cluster(contact_points=['127.0.0.1'], port='1111') + for cp in cluster.endpoints_resolved: + if cp.address in ('::1', '127.0.0.1'): + self.assertEqual(cp.port, 1111) + + with self.assertRaises(ValueError): + cluster = Cluster(contact_points=['127.0.0.1'], port='string') + class SchedulerTest(unittest.TestCase): # TODO: this suite could be expanded; for now just adding a test covering a ticket From 93573aec9411cae95a458970c0f126edd9d4fce2 Mon Sep 17 00:00:00 2001 From: Alejo Sanchez Date: Fri, 24 Jun 2022 11:45:03 +0200 Subject: [PATCH 030/551] Check port range Only allow valid TCP port numbers. Signed-off-by: Alejo Sanchez --- cassandra/cluster.py | 3 +++ tests/unit/test_cluster.py | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index ed5dfbddf7..8932bff58f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1151,6 +1151,9 @@ def __init__(self, raise ValueError("Only numeric values are supported for port (%s)" % port) port = int(port) + if port < 1 or port > 65535: + raise ValueError("Invalid port number (%s) (1-65535)" % port) + if connection_class is not None: self.connection_class = connection_class diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 816492e72e..49529715a6 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -132,6 +132,12 @@ def test_port_str(self): cluster = Cluster(contact_points=['127.0.0.1'], port='string') + def test_port_range(self): + for invalid_port in [0, 65536, -1]: + with self.assertRaises(ValueError): + cluster = Cluster(contact_points=['127.0.0.1'], port=invalid_port) + + class SchedulerTest(unittest.TestCase): # TODO: this suite could be expanded; for now just adding a test covering a ticket From 5d529e10a0aa5beac65b62626294b425d532a5af Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 5 Jul 2022 15:35:06 +0300 Subject: [PATCH 031/551] Implement support of scylla cloud config bundle ```python path_to_bundle_yaml='/file/download/from/cloud/config.yaml' cluster= Cluster(scylla_cloud=path_to_bundle_yaml) ``` --- cassandra/cluster.py | 17 +++++ cassandra/connection.py | 7 +- cassandra/scylla/cloud.py | 142 ++++++++++++++++++++++++++++++++++++++ setup.py | 5 +- 4 files changed, 166 insertions(+), 5 deletions(-) create mode 100644 cassandra/scylla/cloud.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6fa86feb6f..77ed6917be 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -91,6 +91,7 @@ GraphSON3Serializer) from cassandra.datastax.graph.query import _request_timeout_key, _GraphSONContextRowFactory from cassandra.datastax import cloud as dscloud +from cassandra.scylla.cloud import CloudConfiguration try: from cassandra.io.twistedreactor import TwistedConnection @@ -1137,6 +1138,7 @@ def __init__(self, monitor_reporting_interval=30, client_id=None, cloud=None, + scylla_cloud=None, shard_aware_options=None): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as @@ -1157,6 +1159,21 @@ def __init__(self, if connection_class is not None: self.connection_class = connection_class + if scylla_cloud is not None: + if contact_points is not _NOT_SET or endpoint_factory or ssl_context or ssl_options: + raise ValueError("contact_points, endpoint_factory, ssl_context, and ssl_options " + "cannot be specified with a scylla cloud configuration") + + uses_twisted = TwistedConnection and issubclass(self.connection_class, TwistedConnection) + uses_eventlet = EventletConnection and issubclass(self.connection_class, EventletConnection) + + scylla_cloud_config = CloudConfiguration.create(scylla_cloud, pyopenssl=uses_twisted or uses_eventlet) + ssl_context = scylla_cloud_config.ssl_context + endpoint_factory = scylla_cloud_config.endpoint_factory + contact_points = scylla_cloud_config.contact_points + ssl_options = scylla_cloud_config.ssl_options + auth_provider = scylla_cloud_config.auth_provider + if cloud is not None: self.cloud = cloud if contact_points is not _NOT_SET or endpoint_factory or ssl_context or ssl_options: diff --git a/cassandra/connection.py b/cassandra/connection.py index adab22bd16..78d7743881 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -309,16 +309,17 @@ def __repr__(self): class SniEndPointFactory(EndPointFactory): - def __init__(self, proxy_address, port): + def __init__(self, proxy_address, port, node_domain=None): self._proxy_address = proxy_address self._port = port + self._node_domain = node_domain def create(self, row): host_id = row.get("host_id") if host_id is None: raise ValueError("No host_id to create the SniEndPoint") - - return SniEndPoint(self._proxy_address, str(host_id), self._port) + address = "{}.{}".format(host_id, self._node_domain) if self._node_domain else str(host_id) + return SniEndPoint(self._proxy_address, str(address), self._port) def create_from_sni(self, sni): return SniEndPoint(self._proxy_address, sni, self._port) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py new file mode 100644 index 0000000000..01b7dc9884 --- /dev/null +++ b/cassandra/scylla/cloud.py @@ -0,0 +1,142 @@ +# Copyright ScyllaDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import ssl +import tempfile +import base64 +from ssl import SSLContext +from contextlib import contextmanager +from itertools import islice + +import six +import yaml + +from cassandra.connection import SniEndPointFactory +from cassandra.auth import AuthProvider, PlainTextAuthProvider + + +@contextmanager +def file_or_memory(path=None, data=None): + # since we can't read keys/cert from memory yet + # see https://github.com/python/cpython/pull/2449 which isn't accepted and PEP-543 that was withdrawn + # so we use temporary file to load the key + if data: + with tempfile.NamedTemporaryFile(mode="wb") as f: + d = base64.decodebytes(bytes(data, encoding='utf-8')) + f.write(d) + if not d.endswith(b"\n"): + f.write(b"\n") + + f.flush() + yield f.name + + if path: + yield path + + +def nth(iterable, n, default=None): + "Returns the nth item or a default value" + return next(islice(iterable, n, None), default) + + +class CloudConfiguration: + endpoint_factory: SniEndPointFactory + contact_points: list + auth_provider: AuthProvider = None + ssl_options: dict + ssl_context: SSLContext + skip_tls_verify: bool + + def __init__(self, configuration_file, pyopenssl=False): + cloud_config = yaml.safe_load(open(configuration_file)) + + self.current_context = cloud_config['contexts'][cloud_config['currentContext']] + self.data_centers = cloud_config['datacenters'] + self.auth_info = cloud_config['authInfos'][self.current_context['authInfoName']] + self.ssl_options = {} + self.skip_tls_verify = self.auth_info.get('insecureSkipTLSVerify', False) + self.ssl_context = self.create_pyopenssl_context() if pyopenssl else self.create_ssl_context() + + proxy_address, port, node_domain = self.get_server(self.data_centers[self.current_context['datacenterName']], + keys_order=['testServer', 'server']) + self.endpoint_factory = SniEndPointFactory(proxy_address, port=int(port), node_domain=node_domain) + + username, password = self.auth_info.get('username'), self.auth_info.get('password') + if username and password: + self.auth_provider = PlainTextAuthProvider(username, password) + + + @property + def contact_points(self): + _contact_points = [] + for data_center in self.data_centers.values(): + address, _, _ = self.get_server(data_center) + _contact_points.append(self.endpoint_factory.create_from_sni(address)) + return _contact_points + + def get_server(self, data_center, keys_order=None): + keys_order = keys_order or ['server'] + for key in keys_order: + address = data_center.get(key, '') + if not address: + continue + address = address.split(":") + port = nth(address, 1, default=443) + address = nth(address, 0) + node_domain = data_center.get('nodeDomain') + return address, port, node_domain + + def create_ssl_context(self): + ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_SSLv23) + ssl_context.verify_mode = ssl.VerifyMode.CERT_NONE if self.skip_tls_verify else ssl.VerifyMode.CERT_REQUIRED + for data_center in self.data_centers.values(): + with file_or_memory(path=data_center.get('certificateAuthorityPath'), + data=data_center.get('certificateAuthorityData')) as cafile: + ssl_context.load_verify_locations(cadata=open(cafile).read()) + with file_or_memory(path=self.auth_info.get('clientCertificatePath'), + data=self.auth_info.get('clientCertificateData')) as certfile, \ + file_or_memory(path=self.auth_info.get('clientKeyPath'), data=self.auth_info.get('clientKeyData')) as keyfile: + ssl_context.load_cert_chain(keyfile=keyfile, + certfile=certfile) + + return ssl_context + + def create_pyopenssl_context(self): + try: + from OpenSSL import SSL + except ImportError as e: + six.reraise( + ImportError, + ImportError( + "PyOpenSSL must be installed to connect to scylla-cloud with the Eventlet or Twisted event loops"), + sys.exc_info()[2] + ) + ssl_context = SSL.Context(SSL.TLS_METHOD) + ssl_context.set_verify(SSL.VERIFY_PEER, callback=lambda _1, _2, _3, _4, ok: True if self.skip_tls_verify else ok) + for data_center in self.data_centers.values(): + with file_or_memory(path=data_center.get('certificateAuthorityPath'), + data=data_center.get('certificateAuthorityData')) as cafile: + ssl_context.load_verify_locations(cafile) + with file_or_memory(path=self.auth_info.get('clientCertificatePath'), + data=self.auth_info.get('clientCertificateData')) as certfile, \ + file_or_memory(path=self.auth_info.get('clientKeyPath'), data=self.auth_info.get('clientKeyData')) as keyfile: + ssl_context.use_privatekey_file(keyfile) + ssl_context.use_certificate_file(certfile) + + return ssl_context + + @classmethod + def create(cls, configuration_file, pyopenssl=False): + return cls(configuration_file, pyopenssl) diff --git a/setup.py b/setup.py index 364759386a..dda2067fb1 100644 --- a/setup.py +++ b/setup.py @@ -404,7 +404,8 @@ def run_setup(extensions): sys.stderr.write("Bypassing Cython setup requirement\n") dependencies = ['six >=1.9', - 'geomet>=0.1,<0.3'] + 'geomet>=0.1,<0.3', + 'pyyaml > 5.0'] if not PY3: dependencies.append('futures') @@ -429,7 +430,7 @@ def run_setup(extensions): packages=[ 'cassandra', 'cassandra.io', 'cassandra.cqlengine', 'cassandra.graph', 'cassandra.datastax', 'cassandra.datastax.insights', 'cassandra.datastax.graph', - 'cassandra.datastax.graph.fluent', 'cassandra.datastax.cloud' + 'cassandra.datastax.graph.fluent', 'cassandra.datastax.cloud', 'cassandra.scylla' ], keywords='cassandra,cql,orm,dse,graph', include_package_data=True, From 6de917f9bd49f6d0a934b0de2fe5a2538964db9f Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 5 Jul 2022 15:38:10 +0300 Subject: [PATCH 032/551] test_scylla_cloud: add new tests for using cloud config bundle * those test are using CCM sni_proxy code --- .github/workflows/integration-tests.yml | 2 +- cassandra/cluster.py | 7 +- cassandra/scylla/cloud.py | 35 ++++---- .../integration/standard/test_scylla_cloud.py | 82 +++++++++++++++++++ 4 files changed, 105 insertions(+), 21 deletions(-) create mode 100644 tests/integration/standard/test_scylla_cloud.py diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 8e1d292be8..cc3b1edef2 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -19,5 +19,5 @@ jobs: - name: Test with pytest run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py + ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 77ed6917be..5f90195d92 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1160,14 +1160,15 @@ def __init__(self, self.connection_class = connection_class if scylla_cloud is not None: - if contact_points is not _NOT_SET or endpoint_factory or ssl_context or ssl_options: - raise ValueError("contact_points, endpoint_factory, ssl_context, and ssl_options " + if contact_points is not _NOT_SET or ssl_context or ssl_options: + raise ValueError("contact_points, ssl_context, and ssl_options " "cannot be specified with a scylla cloud configuration") uses_twisted = TwistedConnection and issubclass(self.connection_class, TwistedConnection) uses_eventlet = EventletConnection and issubclass(self.connection_class, EventletConnection) - scylla_cloud_config = CloudConfiguration.create(scylla_cloud, pyopenssl=uses_twisted or uses_eventlet) + scylla_cloud_config = CloudConfiguration.create(scylla_cloud, pyopenssl=uses_twisted or uses_eventlet, + endpoint_factory=endpoint_factory) ssl_context = scylla_cloud_config.ssl_context endpoint_factory = scylla_cloud_config.endpoint_factory contact_points = scylla_cloud_config.contact_points diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index 01b7dc9884..5a4fe782ea 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -59,7 +59,7 @@ class CloudConfiguration: ssl_context: SSLContext skip_tls_verify: bool - def __init__(self, configuration_file, pyopenssl=False): + def __init__(self, configuration_file, pyopenssl=False, endpoint_factory=None): cloud_config = yaml.safe_load(open(configuration_file)) self.current_context = cloud_config['contexts'][cloud_config['currentContext']] @@ -69,9 +69,13 @@ def __init__(self, configuration_file, pyopenssl=False): self.skip_tls_verify = self.auth_info.get('insecureSkipTLSVerify', False) self.ssl_context = self.create_pyopenssl_context() if pyopenssl else self.create_ssl_context() - proxy_address, port, node_domain = self.get_server(self.data_centers[self.current_context['datacenterName']], - keys_order=['testServer', 'server']) - self.endpoint_factory = SniEndPointFactory(proxy_address, port=int(port), node_domain=node_domain) + proxy_address, port, node_domain = self.get_server(self.data_centers[self.current_context['datacenterName']]) + + if not endpoint_factory: + endpoint_factory = SniEndPointFactory(proxy_address, port=int(port), node_domain=node_domain) + else: + assert isinstance(endpoint_factory, SniEndPointFactory) + self.endpoint_factory = endpoint_factory username, password = self.auth_info.get('username'), self.auth_info.get('password') if username and password: @@ -86,17 +90,14 @@ def contact_points(self): _contact_points.append(self.endpoint_factory.create_from_sni(address)) return _contact_points - def get_server(self, data_center, keys_order=None): - keys_order = keys_order or ['server'] - for key in keys_order: - address = data_center.get(key, '') - if not address: - continue - address = address.split(":") - port = nth(address, 1, default=443) - address = nth(address, 0) - node_domain = data_center.get('nodeDomain') - return address, port, node_domain + def get_server(self, data_center): + address = data_center.get('server') + address = address.split(":") + port = nth(address, 1, default=443) + address = nth(address, 0) + node_domain = data_center.get('nodeDomain') + assert address and port and node_domain, "server or nodeDomain are missing" + return address, port, node_domain def create_ssl_context(self): ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_SSLv23) @@ -138,5 +139,5 @@ def create_pyopenssl_context(self): return ssl_context @classmethod - def create(cls, configuration_file, pyopenssl=False): - return cls(configuration_file, pyopenssl) + def create(cls, configuration_file, pyopenssl=False, endpoint_factory=None): + return cls(configuration_file, pyopenssl=pyopenssl, endpoint_factory=endpoint_factory) diff --git a/tests/integration/standard/test_scylla_cloud.py b/tests/integration/standard/test_scylla_cloud.py new file mode 100644 index 0000000000..c5fe9ce346 --- /dev/null +++ b/tests/integration/standard/test_scylla_cloud.py @@ -0,0 +1,82 @@ +import os.path +from unittest import TestCase +from ccmlib.utils.ssl_utils import generate_ssl_stores +from ccmlib.utils.sni_proxy import refresh_certs, get_cluster_info, start_sni_proxy, create_cloud_config + +from tests.integration import use_cluster +from cassandra.cluster import Cluster, TwistedConnection +from cassandra.connection import SniEndPointFactory +from cassandra.io.asyncorereactor import AsyncoreConnection +from cassandra.io.libevreactor import LibevConnection +from cassandra.io.geventreactor import GeventConnection +from cassandra.io.eventletreactor import EventletConnection +from cassandra.io.asyncioreactor import AsyncioConnection + +supported_connection_classes = [AsyncoreConnection, LibevConnection, TwistedConnection] +# need to run them with specific configuration like `gevent.monkey.patch_all()` or under async functions +unsupported_connection_classes = [GeventConnection, AsyncioConnection, EventletConnection] + + +class ScyllaCloudConfigTests(TestCase): + def start_cluster_with_proxy(self): + ccm_cluster = self.ccm_cluster + generate_ssl_stores(ccm_cluster.get_path()) + ssl_port = 9142 + sni_port = 443 + ccm_cluster.set_configuration_options(dict( + client_encryption_options= + dict(require_client_auth=True, + truststore=os.path.join(ccm_cluster.get_path(), 'ccm_node.cer'), + certificate=os.path.join(ccm_cluster.get_path(), 'ccm_node.pem'), + keyfile=os.path.join(ccm_cluster.get_path(), 'ccm_node.key'), + enabled=True), + native_transport_port_ssl=ssl_port)) + + ccm_cluster._update_config() + + ccm_cluster.start(wait_for_binary_proto=True) + + nodes_info = get_cluster_info(ccm_cluster, port=ssl_port) + refresh_certs(ccm_cluster, nodes_info) + + docker_id, listen_address, listen_port = \ + start_sni_proxy(ccm_cluster.get_path(), nodes_info=nodes_info, listen_port=sni_port) + ccm_cluster.sni_proxy_docker_id = docker_id + ccm_cluster.sni_proxy_listen_port = listen_port + ccm_cluster._update_config() + + config_data_yaml, config_path_yaml = create_cloud_config(ccm_cluster.get_path(), listen_port) + + endpoint_factory = SniEndPointFactory(listen_address, port=int(listen_port), + node_domain="cluster-id.scylla.com") + + return config_data_yaml, config_path_yaml, endpoint_factory + + def test_1_node_cluster(self): + self.ccm_cluster = use_cluster("sni_proxy", [1], start=False) + config_data_yaml, config_path_yaml, endpoint_factory = self.start_cluster_with_proxy() + + for config in [config_path_yaml, config_data_yaml]: + for connection_class in supported_connection_classes: + cluster = Cluster(scylla_cloud=config, connection_class=connection_class, + endpoint_factory=endpoint_factory) + with cluster.connect() as session: + res = session.execute("SELECT * FROM system.local") + assert res.all() + + assert len(cluster.metadata._hosts) == 1 + assert len(cluster.metadata._host_id_by_endpoint) == 1 + + def test_3_node_cluster(self): + self.ccm_cluster = use_cluster("sni_proxy", [3], start=False) + config_data_yaml, config_path_yaml, endpoint_factory = self.start_cluster_with_proxy() + + for config in [config_path_yaml, config_data_yaml]: + for connection_class in supported_connection_classes: + cluster = Cluster(scylla_cloud=config, connection_class=connection_class, + endpoint_factory=endpoint_factory) + with cluster.connect() as session: + res = session.execute("SELECT * FROM system.local") + assert res.all() + assert len(cluster.metadata._hosts) == 3 + assert len(cluster.metadata._host_id_by_endpoint) == 3 From 8b78f068c63c2a85fc3a0b081a516969cdde73ff Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 6 Jul 2022 15:49:40 +0300 Subject: [PATCH 033/551] metadata: save hosts based on host_id insted of endpoint also keep mapping between endpoints to host_ids, so we control connection can still working with the inital endpoint, while the hosts list only known nodes with thier host_id --- cassandra/cluster.py | 29 +++++++++++++++++------------ cassandra/metadata.py | 31 +++++++++++++++++++++++++------ cassandra/pool.py | 3 +++ 3 files changed, 45 insertions(+), 18 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5f90195d92..df3f69190d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2130,7 +2130,7 @@ def signal_connection_failure(self, host, connection_exc, is_host_addition, expe self.on_down(host, is_host_addition, expect_host_to_be_down) return is_down - def add_host(self, endpoint, datacenter=None, rack=None, signal=True, refresh_nodes=True): + def add_host(self, endpoint, datacenter=None, rack=None, signal=True, refresh_nodes=True, host_id=None): """ Called when adding initial contact points and when the control connection subsequently discovers a new node. @@ -2138,7 +2138,7 @@ def add_host(self, endpoint, datacenter=None, rack=None, signal=True, refresh_no the metadata. Intended for internal use only. """ - host, new = self.metadata.add_or_return_host(Host(endpoint, self.conviction_policy_factory, datacenter, rack)) + host, new = self.metadata.add_or_return_host(Host(endpoint, self.conviction_policy_factory, datacenter, rack, host_id=host_id)) if new and signal: log.info("New Cassandra host %r discovered", host) self.on_add(host, refresh_nodes) @@ -3817,9 +3817,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, partitioner = None token_map = {} - found_hosts = set() + found_host_ids = set() if local_result.parsed_rows: - found_hosts.add(connection.endpoint) local_rows = dict_factory(local_result.column_names, local_result.parsed_rows) local_row = local_rows[0] cluster_name = local_row["cluster_name"] @@ -3833,7 +3832,9 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, datacenter = local_row.get("data_center") rack = local_row.get("rack") self._update_location_info(host, datacenter, rack) + host.endpoint = self._cluster.endpoint_factory.create(local_row) host.host_id = local_row.get("host_id") + found_host_ids.add(host.host_id) host.listen_address = local_row.get("listen_address") host.listen_port = local_row.get("listen_port") host.broadcast_address = _NodeInfo.get_broadcast_address(local_row) @@ -3872,6 +3873,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if partitioner and tokens: token_map[host] = tokens + self._cluster.metadata.update_host(host, old_endpoint=connection.endpoint) + connection.original_endpoint = connection.endpoint = host.endpoint # Check metadata.partitioner to see if we haven't built anything yet. If # every node in the cluster was in the contact points, we won't discover # any new nodes, so we need this additional check. (See PYTHON-90) @@ -3884,24 +3887,26 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, continue endpoint = self._cluster.endpoint_factory.create(row) + host_id = row.get("host_id") - if endpoint in found_hosts: - log.warning("Found multiple hosts with the same endpoint (%s). Excluding peer %s", endpoint, row.get("peer")) + if host_id in found_host_ids: + log.warning("Found multiple hosts with the same host_id (%s). Excluding peer %s", host_id, row.get("peer")) continue - found_hosts.add(endpoint) + found_host_ids.add(host_id) host = self._cluster.metadata.get_host(endpoint) datacenter = row.get("data_center") rack = row.get("rack") + if host is None: log.debug("[control connection] Found new host to connect to: %s", endpoint) - host, _ = self._cluster.add_host(endpoint, datacenter, rack, signal=True, refresh_nodes=False) + host, _ = self._cluster.add_host(endpoint, datacenter=datacenter, rack=rack, signal=True, refresh_nodes=False, host_id=host_id) should_rebuild_token_map = True else: should_rebuild_token_map |= self._update_location_info(host, datacenter, rack) - host.host_id = row.get("host_id") + host.host_id = host_id host.broadcast_address = _NodeInfo.get_broadcast_address(row) host.broadcast_port = _NodeInfo.get_broadcast_port(row) host.broadcast_rpc_address = _NodeInfo.get_broadcast_rpc_address(row) @@ -3915,11 +3920,11 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if partitioner and tokens and self._token_meta_enabled: token_map[host] = tokens - for old_host in self._cluster.metadata.all_hosts(): - if old_host.endpoint.address != connection.endpoint and old_host.endpoint not in found_hosts: + for old_host_id, old_host in self._cluster.metadata.all_hosts_items(): + if old_host_id not in found_host_ids: should_rebuild_token_map = True log.debug("[control connection] Removing host not found in peers metadata: %r", old_host) - self._cluster.remove_host(old_host) + self._cluster.metadata.remove_host_by_host_id(old_host_id) log.debug("[control connection] Finished fetching ring info") if partitioner and should_rebuild_token_map: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 413663002c..ce0ed63bd2 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -124,6 +124,7 @@ def __init__(self): self.keyspaces = {} self.dbaas = False self._hosts = {} + self._host_id_by_endpoint = {} self._hosts_lock = RLock() def export_schema_as_string(self): @@ -330,14 +331,26 @@ def add_or_return_host(self, host): """ with self._hosts_lock: try: - return self._hosts[host.endpoint], False + return self._hosts[host.host_id], False except KeyError: - self._hosts[host.endpoint] = host + self._host_id_by_endpoint[host.endpoint] = host.host_id + self._hosts[host.host_id] = host return host, True def remove_host(self, host): with self._hosts_lock: - return bool(self._hosts.pop(host.endpoint, False)) + self._host_id_by_endpoint.pop(host.endpoint, False) + return bool(self._hosts.pop(host.host_id, False)) + + def remove_host_by_host_id(self, host_id): + with self._hosts_lock: + return bool(self._hosts.pop(host_id, False)) + + def update_host(self, host, old_endpoint): + host, created = self.add_or_return_host(host) + with self._hosts_lock: + self._host_id_by_endpoint.pop(old_endpoint, False) + self._host_id_by_endpoint[host.endpoint] = host.host_id def get_host(self, endpoint_or_address, port=None): """ @@ -345,10 +358,12 @@ def get_host(self, endpoint_or_address, port=None): iterate all hosts to match the :attr:`~.pool.Host.broadcast_rpc_address` and :attr:`~.pool.Host.broadcast_rpc_port` attributes. """ - if not isinstance(endpoint_or_address, EndPoint): - return self._get_host_by_address(endpoint_or_address, port) + with self._hosts_lock: + if not isinstance(endpoint_or_address, EndPoint): + return self._get_host_by_address(endpoint_or_address, port) - return self._hosts.get(endpoint_or_address) + host_id = self._host_id_by_endpoint.get(endpoint_or_address) + return self._hosts.get(host_id) def _get_host_by_address(self, address, port=None): for host in six.itervalues(self._hosts): @@ -365,6 +380,10 @@ def all_hosts(self): with self._hosts_lock: return list(self._hosts.values()) + def all_hosts_items(self): + with self._hosts_lock: + return list(self._hosts.items()) + REPLICATION_STRATEGY_CLASS_PREFIX = "org.apache.cassandra.locator." diff --git a/cassandra/pool.py b/cassandra/pool.py index b864d32ea4..f90802ea36 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -22,6 +22,7 @@ import time import random import copy +import uuid from threading import Lock, RLock, Condition import weakref try: @@ -174,6 +175,8 @@ def __init__(self, endpoint, conviction_policy_factory, datacenter=None, rack=No self.endpoint = endpoint if isinstance(endpoint, EndPoint) else DefaultEndPoint(endpoint) self.conviction_policy = conviction_policy_factory(self) + if not host_id: + host_id = uuid.uuid4() self.host_id = host_id self.set_location_info(datacenter, rack) self.lock = RLock() From c89784f42a67efae23383b2ea5e928673bdb04b6 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 10 Jul 2022 18:41:46 +0300 Subject: [PATCH 034/551] fix unittest to match the logic change in metadata --- cassandra/cluster.py | 7 +- tests/unit/test_control_connection.py | 92 +++++++++++++++++---------- 2 files changed, 64 insertions(+), 35 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index df3f69190d..587181ed15 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3498,7 +3498,7 @@ class ControlConnection(object): _SELECT_PEERS = "SELECT * FROM system.peers" _SELECT_PEERS_NO_TOKENS_TEMPLATE = "SELECT host_id, peer, data_center, rack, rpc_address, {nt_col_name}, release_version, schema_version FROM system.peers" _SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'" - _SELECT_LOCAL_NO_TOKENS = "SELECT host_id, cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'" + _SELECT_LOCAL_NO_TOKENS = "SELECT host_id, cluster_name, data_center, rack, partitioner, release_version, schema_version, rpc_address FROM system.local WHERE key='local'" # Used only when token_metadata_enabled is set to False _SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS = "SELECT rpc_address FROM system.local WHERE key='local'" @@ -3832,7 +3832,9 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, datacenter = local_row.get("data_center") rack = local_row.get("rack") self._update_location_info(host, datacenter, rack) - host.endpoint = self._cluster.endpoint_factory.create(local_row) + new_endpoint = self._cluster.endpoint_factory.create(local_row) + if new_endpoint.address: + host.endpoint = new_endpoint host.host_id = local_row.get("host_id") found_host_ids.add(host.host_id) host.listen_address = local_row.get("listen_address") @@ -3919,7 +3921,6 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, tokens = row.get("tokens", None) if partitioner and tokens and self._token_meta_enabled: token_map[host] = tokens - for old_host_id, old_host in self._cluster.metadata.all_hosts_items(): if old_host_id not in found_host_ids: should_rebuild_token_map = True diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 84a08300a9..f9d2e27c89 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -34,9 +34,14 @@ class MockMetadata(object): def __init__(self): self.hosts = { - DefaultEndPoint("192.168.1.0"): Host(DefaultEndPoint("192.168.1.0"), SimpleConvictionPolicy), - DefaultEndPoint("192.168.1.1"): Host(DefaultEndPoint("192.168.1.1"), SimpleConvictionPolicy), - DefaultEndPoint("192.168.1.2"): Host(DefaultEndPoint("192.168.1.2"), SimpleConvictionPolicy) + 'uuid1': Host(endpoint=DefaultEndPoint("192.168.1.0"), conviction_policy_factory=SimpleConvictionPolicy, host_id='uuid1'), + 'uuid2': Host(endpoint=DefaultEndPoint("192.168.1.1"), conviction_policy_factory=SimpleConvictionPolicy, host_id='uuid2'), + 'uuid3': Host(endpoint=DefaultEndPoint("192.168.1.2"), conviction_policy_factory=SimpleConvictionPolicy, host_id='uuid3') + } + self._host_id_by_endpoint = { + DefaultEndPoint("192.168.1.0"): 'uuid1', + DefaultEndPoint("192.168.1.1"): 'uuid2', + DefaultEndPoint("192.168.1.2"): 'uuid3', } for host in self.hosts.values(): host.set_up() @@ -45,6 +50,7 @@ def __init__(self): self.cluster_name = None self.partitioner = None self.token_map = {} + self.removed_hosts = [] def get_host(self, endpoint_or_address, port=None): if not isinstance(endpoint_or_address, EndPoint): @@ -53,7 +59,8 @@ def get_host(self, endpoint_or_address, port=None): (port is None or host.broadcast_rpc_port is None or host.broadcast_rpc_port == port)): return host else: - return self.hosts.get(endpoint_or_address) + host_id = self._host_id_by_endpoint.get(endpoint_or_address) + return self.hosts.get(host_id) def all_hosts(self): return self.hosts.values() @@ -62,6 +69,26 @@ def rebuild_token_map(self, partitioner, token_map): self.partitioner = partitioner self.token_map = token_map + def add_or_return_host(self, host): + try: + return self.hosts[host.host_id], False + except KeyError: + self._host_id_by_endpoint[host.endpoint] = host.host_id + self.hosts[host.host_id] = host + return host, True + + def update_host(self, host, old_endpoint): + host, created = self.add_or_return_host(host) + self._host_id_by_endpoint[host.endpoint] = host.host_id + self._host_id_by_endpoint.pop(old_endpoint, False) + + def all_hosts_items(self): + return list(self.hosts.items()) + + def remove_host_by_host_id(self, host_id): + self.removed_hosts.append(self.hosts.pop(host_id, False)) + return bool(self.hosts.pop(host_id, False)) + class MockCluster(object): @@ -76,20 +103,20 @@ class MockCluster(object): def __init__(self): self.metadata = MockMetadata() self.added_hosts = [] - self.removed_hosts = [] self.scheduler = Mock(spec=_Scheduler) self.executor = Mock(spec=ThreadPoolExecutor) self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(RoundRobinPolicy()) self.endpoint_factory = DefaultEndPointFactory().configure(self) self.ssl_options = None - def add_host(self, endpoint, datacenter, rack, signal=False, refresh_nodes=True): - host = Host(endpoint, SimpleConvictionPolicy, datacenter, rack) + def add_host(self, endpoint, datacenter, rack, signal=False, refresh_nodes=True, host_id=None): + host = Host(endpoint, SimpleConvictionPolicy, datacenter, rack, host_id=host_id) + host, _ = self.metadata.add_or_return_host(host) self.added_hosts.append(host) return host, True def remove_host(self, host): - self.removed_hosts.append(host) + pass def on_up(self, host): pass @@ -121,20 +148,20 @@ def __init__(self): self.endpoint = DefaultEndPoint("192.168.1.0") self.original_endpoint = self.endpoint self.local_results = [ - ["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"]]] + ["rpc_address", "schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens", "host_id"], + [["192.168.1.0", "a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"], "uuid1"]] ] self.peer_results = [ ["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens", "host_id"], - [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], "uuid1"], - ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"], "uuid2"]] + [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], "uuid2"], + ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"], "uuid3"]] ] self.peer_results_v2 = [ ["native_address", "native_port", "peer", "peer_port", "schema_version", "data_center", "rack", "tokens", "host_id"], - [["192.168.1.1", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"], "uuid1"], - ["192.168.1.2", 9042, "10.0.0.2", 7040, "a", "dc1", "rack1", ["2", "102", "202"], "uuid2"]] + [["192.168.1.1", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"], "uuid2"], + ["192.168.1.2", 9042, "10.0.0.2", 7040, "a", "dc1", "rack1", ["2", "102", "202"], "uuid3"]] ] self.wait_for_responses = Mock(return_value=_node_meta_results(self.local_results, self.peer_results)) @@ -154,15 +181,15 @@ def sleep(self, amount): class ControlConnectionTest(unittest.TestCase): _matching_schema_preloaded_results = _node_meta_results( - local_results=(["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens", "host_id"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"], "uuid1"]]), + local_results=(["rpc_address", "schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens", "host_id"], + [["192.168.1.0", "a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"], "uuid1"]]), peer_results=(["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens", "host_id"], [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], "uuid2"], ["192.168.1.2", "10.0.0.2", "a", "dc1", "rack1", ["2", "102", "202"], "uuid3"]])) _nonmatching_schema_preloaded_results = _node_meta_results( - local_results=(["schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens", "host_id"], - [["a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"], "uuid1"]]), + local_results=(["rpc_address", "schema_version", "cluster_name", "data_center", "rack", "partitioner", "release_version", "tokens", "host_id"], + [["192.168.1.0", "a", "foocluster", "dc1", "rack1", "Murmur3Partitioner", "2.2.0", ["0", "100", "200"], "uuid1"]]), peer_results=(["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens", "host_id"], [["192.168.1.1", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], "uuid2"], ["192.168.1.2", "10.0.0.2", "b", "dc1", "rack1", ["2", "102", "202"], "uuid3"]])) @@ -240,10 +267,11 @@ def test_wait_for_schema_agreement_rpc_lookup(self): If the rpc_address is 0.0.0.0, the "peer" column should be used instead. """ self.connection.peer_results[1].append( - ["0.0.0.0", PEER_IP, "b", "dc1", "rack1", ["3", "103", "203"]] + ["0.0.0.0", PEER_IP, "b", "dc1", "rack1", ["3", "103", "203"], "uuid6"] ) - host = Host(DefaultEndPoint("0.0.0.0"), SimpleConvictionPolicy) - self.cluster.metadata.hosts[DefaultEndPoint("foobar")] = host + host = Host(DefaultEndPoint("0.0.0.0"), SimpleConvictionPolicy, host_id='uuid6') + self.cluster.metadata.hosts[host.host_id] = host + self.cluster.metadata._host_id_by_endpoint[DefaultEndPoint(PEER_IP)] = host.host_id host.is_up = False # even though the new host has a different schema version, it's @@ -285,7 +313,7 @@ def refresh_and_validate_added_hosts(): del self.connection.peer_results[:] self.connection.peer_results.extend([ ["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens", "host_id"], - [["192.168.1.3", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], 'uuid5'], + [["192.168.1.3", "10.0.0.1", "a", "dc1", "rack1", ["1", "101", "201"], 'uuid6'], # all others are invalid [None, None, "a", "dc1", "rack1", ["1", "101", "201"], 'uuid1'], ["192.168.1.7", "10.0.0.1", "a", None, "rack1", ["1", "101", "201"], 'uuid2'], @@ -299,7 +327,7 @@ def refresh_and_validate_added_hosts(): del self.connection.peer_results[:] self.connection.peer_results.extend([ ["native_address", "native_port", "peer", "peer_port", "schema_version", "data_center", "rack", "tokens", "host_id"], - [["192.168.1.4", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"], "uuid1"], + [["192.168.1.4", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"], "uuid6"], # all others are invalid [None, 9042, None, 7040, "a", "dc1", "rack1", ["2", "102", "202"], "uuid2"], ["192.168.1.5", 9042, "10.0.0.2", 7040, "a", None, "rack1", ["2", "102", "202"], "uuid2"], @@ -336,7 +364,7 @@ def test_refresh_nodes_and_tokens_no_partitioner(self): Test handling of an unknown partitioner. """ # set the partitioner column to None - self.connection.local_results[1][0][4] = None + self.connection.local_results[1][0][5] = None self.control_connection.refresh_node_list_and_token_map() meta = self.cluster.metadata self.assertEqual(meta.partitioner, None) @@ -344,7 +372,7 @@ def test_refresh_nodes_and_tokens_no_partitioner(self): def test_refresh_nodes_and_tokens_add_host(self): self.connection.peer_results[1].append( - ["192.168.1.3", "10.0.0.3", "a", "dc1", "rack1", ["3", "103", "203"], "uuid3"] + ["192.168.1.3", "10.0.0.3", "a", "dc1", "rack1", ["3", "103", "203"], "uuid4"] ) self.cluster.scheduler.schedule = lambda delay, f, *args, **kwargs: f(*args, **kwargs) self.control_connection.refresh_node_list_and_token_map() @@ -352,13 +380,13 @@ def test_refresh_nodes_and_tokens_add_host(self): self.assertEqual(self.cluster.added_hosts[0].address, "192.168.1.3") self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1") self.assertEqual(self.cluster.added_hosts[0].rack, "rack1") - self.assertEqual(self.cluster.added_hosts[0].host_id, "uuid3") + self.assertEqual(self.cluster.added_hosts[0].host_id, "uuid4") def test_refresh_nodes_and_tokens_remove_host(self): del self.connection.peer_results[1][1] self.control_connection.refresh_node_list_and_token_map() - self.assertEqual(1, len(self.cluster.removed_hosts)) - self.assertEqual(self.cluster.removed_hosts[0].address, "192.168.1.2") + self.assertEqual(1, len(self.cluster.metadata.removed_hosts)) + self.assertEqual(self.cluster.metadata.removed_hosts[0].address, "192.168.1.2") def test_refresh_nodes_and_tokens_timeout(self): @@ -423,7 +451,7 @@ def test_handle_status_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_status_change(event) - host = self.cluster.metadata.hosts[DefaultEndPoint('192.168.1.0')] + host = self.cluster.metadata.get_host(DefaultEndPoint('192.168.1.0')) self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.cluster.on_up, host) self.cluster.scheduler.schedule.reset_mock() @@ -440,7 +468,7 @@ def test_handle_status_change(self): 'address': ('192.168.1.0', 9000) } self.control_connection._handle_status_change(event) - host = self.cluster.metadata.hosts[DefaultEndPoint('192.168.1.0')] + host = self.cluster.metadata.get_host(DefaultEndPoint('192.168.1.0')) self.assertIs(host, self.cluster.down_host) def test_handle_schema_change(self): @@ -516,7 +544,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_port(self): del self.connection.peer_results[:] self.connection.peer_results.extend(self.connection.peer_results_v2) self.connection.peer_results[1].append( - ["192.168.1.3", 555, "10.0.0.3", 666, "a", "dc1", "rack1", ["3", "103", "203"], "uuid3"] + ["192.168.1.3", 555, "10.0.0.3", 666, "a", "dc1", "rack1", ["3", "103", "203"], "uuid4"] ) self.connection.wait_for_responses = Mock(return_value=_node_meta_results( self.connection.local_results, self.connection.peer_results)) @@ -536,7 +564,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_invalid_port(self): del self.connection.peer_results[:] self.connection.peer_results.extend(self.connection.peer_results_v2) self.connection.peer_results[1].append( - ["192.168.1.3", -1, "10.0.0.3", 0, "a", "dc1", "rack1", ["3", "103", "203"], "uuid3"] + ["192.168.1.3", -1, "10.0.0.3", 0, "a", "dc1", "rack1", ["3", "103", "203"], "uuid4"] ) self.connection.wait_for_responses = Mock(return_value=_node_meta_results( self.connection.local_results, self.connection.peer_results)) From 3c55ec8a4861562614355a1a789de2e23aad2e38 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 15 Aug 2022 13:50:42 +0300 Subject: [PATCH 035/551] ci: support running integration tests with libev support for some of the cloud test it's importent we can run with other connection classes --- ci/run_integration_test.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh index f5a36a76df..7c1396a665 100755 --- a/ci/run_integration_test.sh +++ b/ci/run_integration_test.sh @@ -1,5 +1,7 @@ #! /bin/bash -e +sudo apt-get install gcc python3-dev libev4 libev-dev + aio_max_nr_recommended_value=1048576 aio_max_nr=$(cat /proc/sys/fs/aio-max-nr) echo "The current aio-max-nr value is $aio_max_nr" @@ -13,7 +15,7 @@ if (( aio_max_nr != aio_max_nr_recommended_value )); then fi fi -BRANCH='branch-4.5' +BRANCH='branch-5.0' python3 -m venv .test-venv source .test-venv/bin/activate @@ -30,7 +32,7 @@ pip install awscli pip install https://github.com/scylladb/scylla-ccm/archive/master.zip # download version -LATEST_MASTER_JOB_ID=`aws --no-sign-request s3 ls downloads.scylladb.com/unstable/scylla/${BRANCH}/relocatable/ | grep '2021-' | tr -s ' ' | cut -d ' ' -f 3 | tr -d '\/' | sort -g | tail -n 1` +LATEST_MASTER_JOB_ID=`aws --no-sign-request s3 ls downloads.scylladb.com/unstable/scylla/${BRANCH}/relocatable/ | tr -s ' ' | cut -d ' ' -f 3 | tr -d '\/' | sort -g | tail -n 1` AWS_BASE=s3://downloads.scylladb.com/unstable/scylla/${BRANCH}/relocatable/${LATEST_MASTER_JOB_ID} aws s3 --no-sign-request cp ${AWS_BASE}/scylla-package.tar.gz . & From 4cb2ac32714a72df3620ace24dd7ea46697207e4 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 19 Aug 2022 12:45:17 +0100 Subject: [PATCH 036/551] docs: update theme 1.3 --- .github/workflows/docs-pages.yaml | 2 ++ docs/Makefile | 9 ++++++++- docs/_utils/redirects.yaml | 0 docs/conf.py | 5 ----- docs/pyproject.toml | 3 ++- 5 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 docs/_utils/redirects.yaml diff --git a/.github/workflows/docs-pages.yaml b/.github/workflows/docs-pages.yaml index 5965790c6f..7f45132c9c 100644 --- a/.github/workflows/docs-pages.yaml +++ b/.github/workflows/docs-pages.yaml @@ -29,6 +29,8 @@ jobs: run: python setup.py develop - name: Build docs run: make -C docs multiversion + - name: Build redirects + run: make -C docs redirects - name: Deploy docs to GitHub Pages run: ./docs/_utils/deploy.sh env: diff --git a/docs/Makefile b/docs/Makefile index c6b8b5c53a..e31db5dd4b 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,7 +1,7 @@ # Global variables # You can set these variables from the command line. POETRY = poetry -SPHINXOPTS = +SPHINXOPTS = SPHINXBUILD = $(POETRY) run sphinx-build PAPER = BUILDDIR = _build @@ -72,6 +72,12 @@ multiversion: setup @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." +.PHONY: redirects +redirects: setup + $(POETRY) run redirects-cli fromfile --yaml-file _utils/redirects.yaml --output-dir $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + # Preview commands .PHONY: preview preview: setup @@ -91,3 +97,4 @@ test: setup .PHONY: linkcheck linkcheck: setup $(SPHINXBUILD) -b linkcheck $(SOURCEDIR) $(BUILDDIR)/linkcheck + diff --git a/docs/_utils/redirects.yaml b/docs/_utils/redirects.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/conf.py b/docs/conf.py index 1e73959afc..76c2a576a8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -77,11 +77,6 @@ # Prefix added to all the URLs generated in the 404 page. notfound_urls_prefix = '' -# -- Options for redirect extension -------------------------------------------- - -# Read a YAML dictionary of redirections and generate an HTML file for each -redirects_file = '_utils/redirections.yaml' - # -- Options for multiversion -------------------------------------------------- # Whitelist pattern for tags diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 82bd20386e..6a67dfa605 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -14,9 +14,10 @@ python = "^3.7" pyyaml = "6.0" pygments = "2.2.0" recommonmark = "0.7.1" +redirects_cli ="^0.1.2" sphinx-autobuild = "2021.3.14" sphinx-sitemap = "2.1.0" -sphinx-scylladb-theme = "~1.2.1" +sphinx-scylladb-theme = "~1.3.1" sphinx-multiversion-scylla = "~0.2.11" Sphinx = "4.3.2" scales = "1.0.9" From 02a79c7c103854fc8651c2fb68c7c629a999ec97 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 19 Aug 2022 13:21:14 +0100 Subject: [PATCH 037/551] Update pyproject.toml --- docs/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 6a67dfa605..e9ffdd15d7 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -14,7 +14,7 @@ python = "^3.7" pyyaml = "6.0" pygments = "2.2.0" recommonmark = "0.7.1" -redirects_cli ="^0.1.2" +redirects_cli ="~0.1.2" sphinx-autobuild = "2021.3.14" sphinx-sitemap = "2.1.0" sphinx-scylladb-theme = "~1.3.1" From 17b2dca6c444ea14745261ff15014757e257ba3c Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 19 Aug 2022 13:21:30 +0100 Subject: [PATCH 038/551] Update Makefile --- docs/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Makefile b/docs/Makefile index e31db5dd4b..de0bf4afd2 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,7 +1,7 @@ # Global variables # You can set these variables from the command line. POETRY = poetry -SPHINXOPTS = +SPHINXOPTS = SPHINXBUILD = $(POETRY) run sphinx-build PAPER = BUILDDIR = _build From e4e34846d548cd2dc8e070d5c3ec43f28b980c03 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Sat, 3 Sep 2022 04:32:59 -0500 Subject: [PATCH 039/551] Ninja fix for quorum docstring --- cassandra/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 5739d5d98e..e6cb5c55bb 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -55,7 +55,7 @@ class ConsistencyLevel(object): QUORUM = 4 """ - ``ceil(RF/2)`` replicas must respond to consider the operation a success + ``ceil(RF/2) + 1`` replicas must respond to consider the operation a success """ ALL = 5 From 26832c85055b04e399bd3c0bd8ac46bb60c9bf82 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 31 Aug 2022 11:08:17 +0300 Subject: [PATCH 040/551] tests: fix test_bad_contact_point since recent changes to metadata, on first round we remove all host that we creating with unkown host_ids, so this test is failing cause it's expects all the corrent hosts to be available in the, metadata, they would be available, but they were missing. seems like we didn't update the peers with thier newly found host_ids and hence they were remove (only a new phase of `refresh_node_list_and_token_map()` would add them back to the hosts list), now that we update them same as we update the local (control_connection host), test is working. --- cassandra/cluster.py | 2 ++ tests/integration/standard/test_metadata.py | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 587181ed15..8b40daa437 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3921,6 +3921,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, tokens = row.get("tokens", None) if partitioner and tokens and self._token_meta_enabled: token_map[host] = tokens + self._cluster.metadata.update_host(host, old_endpoint=endpoint) + for old_host_id, old_host in self._cluster.metadata.all_hosts_items(): if old_host_id not in found_host_ids: should_rebuild_token_map = True diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 7a6cef6398..c1e26bc5d9 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -41,6 +41,7 @@ greaterthanorequaldse67, lessthancass40, TestCluster, DSE_VERSION) +from tests.util import wait_until log = logging.getLogger(__name__) @@ -124,7 +125,12 @@ def test_bad_contact_point(self): @test_category metadata """ - self.assertEqual(len(self.cluster.metadata.all_hosts()), 3) + # wait until we have only 3 hosts + wait_until(condition=lambda: len(self.cluster.metadata.all_hosts()) == 3, delay=0.5, max_attempts=5) + + # verify the un-existing host was filtered + for host in self.cluster.metadata.all_hosts(): + self.assertNotEquals(host.endpoint.address, '126.0.0.186') class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase): From e44785df89c4d63fc12898e645b9f1734f34ae33 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 31 Aug 2022 17:33:54 +0300 Subject: [PATCH 041/551] _refresh_node_list_and_token_map: bring back multiple endpoint check `test_address_translator_basic` start failing, it maps multiple nodes into the same address, and recent changes to host metadata to be saved based on host_id were breaking this assumption that host with identical endpoint would be filtered out. no sure how real life case is it, but keeping behavier never the less. --- cassandra/cluster.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8b40daa437..80a1ef9b4c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3818,6 +3818,8 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, token_map = {} found_host_ids = set() + found_endpoints = set() + if local_result.parsed_rows: local_rows = dict_factory(local_result.column_names, local_result.parsed_rows) local_row = local_rows[0] @@ -3836,7 +3838,10 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if new_endpoint.address: host.endpoint = new_endpoint host.host_id = local_row.get("host_id") + found_host_ids.add(host.host_id) + found_endpoints.add(host.endpoint) + host.listen_address = local_row.get("listen_address") host.listen_port = local_row.get("listen_port") host.broadcast_address = _NodeInfo.get_broadcast_address(local_row) @@ -3891,12 +3896,16 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, endpoint = self._cluster.endpoint_factory.create(row) host_id = row.get("host_id") + if endpoint in found_endpoints: + log.warning("Found multiple hosts with the same endpoint(%s). Excluding peer %s - %s", endpoint, row.get("peer"), host_id) + continue + if host_id in found_host_ids: log.warning("Found multiple hosts with the same host_id (%s). Excluding peer %s", host_id, row.get("peer")) continue found_host_ids.add(host_id) - + found_endpoints.add(endpoint) host = self._cluster.metadata.get_host(endpoint) datacenter = row.get("data_center") rack = row.get("rack") From e4c508405215496fabadd91e175f2f9f713c785a Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 19 Sep 2022 13:00:04 +0300 Subject: [PATCH 042/551] Release 3.25.6 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 4966da3aaf..ed8ce5acfb 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 4) +__version_info__ = (3, 25, 6) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 76c2a576a8..4583f4b62f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.6-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.25.4-scylla' +LATEST_VERSION = '3.25.6-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From cdeb396bc1042a6942484f0a6ffd305e635660af Mon Sep 17 00:00:00 2001 From: Piotr Grabowski Date: Tue, 4 Oct 2022 13:40:58 +0200 Subject: [PATCH 043/551] pool: inline signal_connection_failure code In return_connection() inline the implementation of signal_connection_failure method. After the change, the code works exactly the same as before. This change is required for a followup commit. --- cassandra/pool.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index f90802ea36..20edfad313 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -542,8 +542,9 @@ def return_connection(self, connection, stream_was_orphaned=False): if not connection.signaled_error: log.debug("Defunct or closed connection (%s) returned to pool, potentially " "marking host %s as down", id(connection), self.host) - is_down = self._session.cluster.signal_connection_failure( - self.host, connection.last_error, is_host_addition=False) + is_down = self.host.signal_connection_failure(connection.last_error) + if is_down: + self._session.cluster.on_down(self.host, False, False) connection.signaled_error = True if self.shutdown_on_error and not is_down: From d0751e69d005c9bc2a8437772efdd6c2fd335fca Mon Sep 17 00:00:00 2001 From: Piotr Grabowski Date: Tue, 4 Oct 2022 13:41:57 +0200 Subject: [PATCH 044/551] pool: call cluster.on_down() after pool shutdown() In this commit, a logic in return_connection() is modified. This particular piece of code is executed when a connection is being closed or it is defunct. The on_down() call is a call to method decorated with @run_in_executor, meaning it can be executed in a separate thread. The shutdown() call is a synchronous method. This can cause a race: - If shutdown() is executed before on_down(), then on_down() will see there are no valid connections and will reconnect. This is good. - But if on_down() is faster and is executed before shutdown(), the on_down() method will see that there is still a valid connection in the pool (because the pool wasn't shut down) and will NOT reconnect. Afterwards, the shutdown() completes and there are no valid connections in the pool. After that, you will not be able to send queries to that host, as it will result in "ConnectionException('Pool is shutdown')" exception. The problem is fixed in this commit by moving on_down() calls after a call to shutdown(). Because shutdown() is a synchronous method, this means that first shutdown() is executed and then on_down() is executed. This is the first case described in the previous paragraph and it is a correct order. You can also think about this change as a reduction from two possible orderings (shutdown(), on_down() or on_down(), shutdown()) to a single possible ordering (shutdown(), on_down()). Fixes #170 --- cassandra/pool.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index 20edfad313..9fa1616735 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -543,16 +543,14 @@ def return_connection(self, connection, stream_was_orphaned=False): log.debug("Defunct or closed connection (%s) returned to pool, potentially " "marking host %s as down", id(connection), self.host) is_down = self.host.signal_connection_failure(connection.last_error) - if is_down: - self._session.cluster.on_down(self.host, False, False) connection.signaled_error = True if self.shutdown_on_error and not is_down: is_down = True - self._session.cluster.on_down(self.host, is_host_addition=False) if is_down: self.shutdown() + self._session.cluster.on_down(self.host, is_host_addition=False) else: connection.close() with self._lock: From f5d2d40423e08459d861f52cbe14f143efe58405 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 3 Nov 2022 09:11:01 +0200 Subject: [PATCH 045/551] build: support for python 3.11 wheel update to `cibuildwheel==2.11.2` to start producing python 3.11 wheels --- .github/workflows/build-experimental.yml | 2 +- .github/workflows/build-push.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-experimental.yml b/.github/workflows/build-experimental.yml index 63c30c5bf0..a278c4cf72 100644 --- a/.github/workflows/build-experimental.yml +++ b/.github/workflows/build-experimental.yml @@ -32,7 +32,7 @@ jobs: - name: Install cibuildwheel run: | - python -m pip install cibuildwheel==2.3.0 + python -m pip install cibuildwheel==2.11.2 - name: Build wheels run: | diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 320df2e779..08255cea7d 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -53,7 +53,7 @@ jobs: - name: Install cibuildwheel run: | - python -m pip install cibuildwheel==2.3.0 + python -m pip install cibuildwheel==2.11.2 - name: Install OpenSSL for Windows if: runner.os == 'Windows' From ae656921686a508b4e9ff6d295b60b43ab8eb615 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 3 Nov 2022 13:39:39 +0200 Subject: [PATCH 046/551] fix(unittests): fix `test_return_closed/defunct_*` recent fixes to avoid race in stop/failing connection d0751e69d005c9bc2a8437772efdd6c2fd335fca cdeb396bc1042a6942484f0a6ffd305e635660af start faling a few unittests that wasn't mocking the corret function to work properly --- tests/unit/test_host_connection_pool.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index f9e59648ba..1bb6e8816d 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -144,6 +144,7 @@ def test_return_defunct_connection(self): pool.borrow_connection(timeout=0.01) conn.is_defunct = True session.cluster.signal_connection_failure.return_value = False + host.signal_connection_failure.return_value = False pool.return_connection(conn) # the connection should be closed a new creation scheduled @@ -165,16 +166,18 @@ def test_return_defunct_connection_on_down_host(self): pool.borrow_connection(timeout=0.01) conn.is_defunct = True session.cluster.signal_connection_failure.return_value = True + host.signal_connection_failure.return_value = True pool.return_connection(conn) # the connection should be closed a new creation scheduled - self.assertTrue(session.cluster.signal_connection_failure.call_args) self.assertTrue(conn.close.call_args) if self.PoolImpl is HostConnection: # on shard aware implementation we use submit function regardless + self.assertTrue(host.signal_connection_failure.call_args) self.assertTrue(session.submit.called) else: self.assertFalse(session.submit.called) + self.assertTrue(session.cluster.signal_connection_failure.call_args) self.assertTrue(pool.is_shutdown) def test_return_closed_connection(self): @@ -190,6 +193,7 @@ def test_return_closed_connection(self): pool.borrow_connection(timeout=0.01) conn.is_closed = True session.cluster.signal_connection_failure.return_value = False + host.signal_connection_failure.return_value = False pool.return_connection(conn) # a new creation should be scheduled From f7325c28be54f87bb9510a0faeb11e90aa243a7b Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 3 Nov 2022 16:41:41 +0200 Subject: [PATCH 047/551] fix(test_asyncioreactor.py): handle AttribueError from asynctest asyncio connection backend is mostly broken, and need refactoring for now we'll just skip the test on error from importing asynctest which is broken for python 3.11 --- tests/unit/io/test_asyncioreactor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/io/test_asyncioreactor.py b/tests/unit/io/test_asyncioreactor.py index aa00a32943..503e3ca34a 100644 --- a/tests/unit/io/test_asyncioreactor.py +++ b/tests/unit/io/test_asyncioreactor.py @@ -3,7 +3,7 @@ from cassandra.io.asyncioreactor import AsyncioConnection import asynctest ASYNCIO_AVAILABLE = True -except (ImportError, SyntaxError): +except (ImportError, SyntaxError, AttributeError): AsyncioConnection = None ASYNCIO_AVAILABLE = False From 548d6953c2454922188c29e82f27d8e9f3170f74 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 6 Nov 2022 13:05:32 +0200 Subject: [PATCH 048/551] test_immutable_predicate: exception string change in python3.11 immutable property exception string chaged in python3.11 from "can't set attribute" to "object has no setter" test no supports both. --- tests/unit/test_policies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index a31b4f4c1b..d3ba99fc82 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1295,7 +1295,7 @@ def test_init_kwargs(self): )) def test_immutable_predicate(self): - expected_message_regex = "can't set attribute" + expected_message_regex = "can't set attribute|object has no setter" hfp = HostFilterPolicy(child_policy=Mock(name='child_policy'), predicate=Mock(name='predicate')) with self.assertRaisesRegexp(AttributeError, expected_message_regex): From 91eaf4af5a1d85b6df48c20e16a87c235aec5901 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 6 Nov 2022 13:10:56 +0200 Subject: [PATCH 049/551] github actions: enable integration test and build/unittest by default --- .github/workflows/build-push.yml | 4 ++-- .github/workflows/integration-tests.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 08255cea7d..1fb39db616 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -15,7 +15,7 @@ env: jobs: build_wheels: name: Build wheels ${{ matrix.os }} (${{ matrix.platform }}) - if: contains(github.event.pull_request.labels.*.name, 'test-build') || github.event_name == 'push' && endsWith(github.event.ref, 'scylla') + if: "(!contains(github.event.pull_request.labels.*.name, 'disable-test-build')) || github.event_name == 'push' && endsWith(github.event.ref, 'scylla')" runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -121,7 +121,7 @@ jobs: build_sdist: name: Build source distribution - if: contains(github.event.pull_request.labels.*.name, 'test-build') || github.event_name == 'push' && endsWith(github.event.ref, 'scylla') + if: "(!contains(github.event.pull_request.labels.*.name, 'disable-test-build'))|| github.event_name == 'push' && endsWith(github.event.ref, 'scylla')" runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index cc3b1edef2..1939cc43d3 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -9,7 +9,7 @@ on: jobs: tests: runs-on: ubuntu-20.04 - if: contains(github.event.pull_request.labels.*.name, 'integration-tests') + if: "!contains(github.event.pull_request.labels.*.name, 'disable-integration-tests')" steps: - uses: actions/checkout@v2 - name: Set up Python 3.8 From ac037670bd04e18f110b042133010e8124a13e7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 4 Nov 2022 13:43:11 +0100 Subject: [PATCH 050/551] Fix code incompatibilities with Python 2 There are some functionalities used that don't work with Python 2. - Format strings - list.clear() - Type hints - super() without arguments - Packages without __init__.py file - Some import names (futures.thread -> concurrent.futures) Import behaviour changed between Py2 and 3 and one file was missing `from __future__ import absolute_import` line (that makes the behaviour consistent between 2 and 3) which caused import error. Some members in "c_sharding_info.pyx" had "str" type. This type maps to "str" both in Py2 and 3 - and those are different types - raw bytes in Py2, unicode string in py3. This caused errors in Py2, because code was trying to assign unicode strings to those members. The fix is to use Cython's / Pyrex's "unicode" type - it maps to "unicode" in Py2 and to "str" in Py3. This commit fixes all of those problems. --- cassandra/c_shard_info.pyx | 4 ++-- cassandra/connection.py | 2 +- cassandra/pool.py | 6 +++++- cassandra/scylla/__init__.py | 0 cassandra/scylla/cloud.py | 14 ++++++++------ tests/unit/test_host_connection_pool.py | 2 +- tests/unit/test_shard_aware.py | 6 +++--- 7 files changed, 20 insertions(+), 14 deletions(-) create mode 100644 cassandra/scylla/__init__.py diff --git a/cassandra/c_shard_info.pyx b/cassandra/c_shard_info.pyx index a1aa42911a..39c098ee82 100644 --- a/cassandra/c_shard_info.pyx +++ b/cassandra/c_shard_info.pyx @@ -19,8 +19,8 @@ cdef extern from *: cdef class ShardingInfo(): cdef readonly int shards_count - cdef readonly str partitioner - cdef readonly str sharding_algorithm + cdef readonly unicode partitioner + cdef readonly unicode sharding_algorithm cdef readonly int sharding_ignore_msb cdef readonly int shard_aware_port cdef readonly int shard_aware_port_ssl diff --git a/cassandra/connection.py b/cassandra/connection.py index 78d7743881..c3ba42d725 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -903,7 +903,7 @@ def _initiate_connection(self, sockaddr): break except Exception as ex: log.debug("port=%d couldn't bind cause: %s", port, str(ex)) - log.debug(f'connection (%r) port=%d should be shard_id=%d', id(self), port, port % self.total_shards) + log.debug('connection (%r) port=%d should be shard_id=%d', id(self), port, port % self.total_shards) self._socket.connect(sockaddr) diff --git a/cassandra/pool.py b/cassandra/pool.py index 9fa1616735..2f3fea93ed 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -15,6 +15,8 @@ """ Connection pooling and host management. """ +from __future__ import absolute_import + from concurrent.futures import Future from functools import total_ordering import logging @@ -1200,7 +1202,9 @@ def shutdown(self): with self._lock: connections_to_close.extend(self._connections) self.open_count -= len(self._connections) - self._connections.clear() + # After dropping support for Python 2 we can again use list.clear() + # self._connections.clear() + del self._connections[:] connections_to_close.extend(self._trash) self._trash.clear() diff --git a/cassandra/scylla/__init__.py b/cassandra/scylla/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index 5a4fe782ea..d9ad264155 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -52,12 +52,14 @@ def nth(iterable, n, default=None): class CloudConfiguration: - endpoint_factory: SniEndPointFactory - contact_points: list - auth_provider: AuthProvider = None - ssl_options: dict - ssl_context: SSLContext - skip_tls_verify: bool + # Commented out because this syntax doesn't work with Python2 + # Can be restores after dropping support for Python2 + # endpoint_factory: SniEndPointFactory + # contact_points: list + # auth_provider: AuthProvider = None + # ssl_options: dict + # ssl_context: SSLContext + # skip_tls_verify: bool def __init__(self, configuration_file, pyopenssl=False, endpoint_factory=None): cloud_config = yaml.safe_load(open(configuration_file)) diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 1bb6e8816d..40f770f00c 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -283,7 +283,7 @@ class MockSession(MagicMock): keyspace = "reprospace" def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) + super(MockSession, self).__init__(*args, **kwargs) self.cluster = MagicMock() self.cluster.executor = ThreadPoolExecutor(max_workers=2, initializer=self.executor_init) self.cluster.signal_connection_failure = lambda *args, **kwargs: False diff --git a/tests/unit/test_shard_aware.py b/tests/unit/test_shard_aware.py index c05eb51d5d..dfe66eff8e 100644 --- a/tests/unit/test_shard_aware.py +++ b/tests/unit/test_shard_aware.py @@ -18,8 +18,8 @@ import unittest # noqa import logging -from unittest.mock import MagicMock -from futures.thread import ThreadPoolExecutor +from mock import MagicMock +from concurrent.futures import ThreadPoolExecutor from cassandra.cluster import ShardAwareOptions from cassandra.pool import HostConnection, HostDistance @@ -62,7 +62,7 @@ class MockSession(MagicMock): keyspace = "ks1" def __init__(self, is_ssl=False, *args, **kwargs): - super().__init__(*args, **kwargs) + super(MockSession, self).__init__(*args, **kwargs) self.cluster = MagicMock() if is_ssl: self.cluster.ssl_options = {'some_ssl_options': True} From 5021f5c5b24e9cb3c4cd8e623346697a2b2427f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 4 Nov 2022 13:47:47 +0100 Subject: [PATCH 051/551] Fix requirements Add missing packages to requirements.txt / test-requirements.txt, so that it works with both Python 3 and Python 2. Replace nose with pytest --- .github/workflows/build-push.yml | 2 +- requirements.txt | 2 +- test-requirements.txt | 7 ++++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 1fb39db616..55bf95c3d8 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -7,7 +7,7 @@ env: CIBW_TEST_COMMAND_LINUX: "pytest --import-mode append {project}/tests/unit -k 'not (test_connection_initialization or test_cloud)' && EVENT_LOOP_MANAGER=gevent pytest --import-mode append {project}/tests/unit/io/test_geventreactor.py && EVENT_LOOP_MANAGER=eventlet pytest --import-mode append {project}/tests/unit/io/test_eventletreactor.py " CIBW_TEST_COMMAND_MACOS: "pytest --import-mode append {project}/tests/unit -k 'not (test_multi_timer_validation or test_empty_connections or test_connection_initialization or test_timer_cancellation or test_cloud)' " CIBW_TEST_COMMAND_WINDOWS: "pytest --import-mode append {project}/tests/unit -k \"not (test_deserialize_date_range_year or test_datetype or test_libevreactor or test_connection_initialization or test_cloud)\" " - CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt pytest" + CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libffi-devel libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" CIBW_SKIP: cp35* cp36* *musllinux* diff --git a/requirements.txt b/requirements.txt index f784fba1b9..28a897b034 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ geomet>=0.1,<0.3 six >=1.9 -futures <=2.2.0 +futures==3.4.0; python_version < '3.0.0' # Futures is not required for Python 3, but it works up through 2.2.0 (after which it introduced breaking syntax). # This is left here to make sure install -r works with any runtime. When installing via setup.py, futures is omitted # for Python 3, in favor of the standard library implementation. diff --git a/test-requirements.txt b/test-requirements.txt index df38354f79..3c1382debe 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,12 +1,12 @@ -r requirements.txt scales -nose +pytest mock>1.1 pytz sure pure-sasl -twisted[tls]; python_version >= '3.5' -twisted[tls]==19.2.1; python_version < '3.5' +twisted[tls]; python_version >= '3.5' or python_version < '3.0' +twisted[tls]==19.2.1; python_version < '3.5' and python_version >= '3.0' gevent>=1.0; platform_machine != 'i686' and platform_machine != 'win32' gevent==20.5.0; platform_machine == 'i686' or platform_machine == 'win32' eventlet @@ -17,3 +17,4 @@ backports.ssl_match_hostname; python_version < '2.7.9' futurist; python_version >= '3.7' asynctest; python_version >= '3.5' ipaddress; python_version < '3.3.0' +pyyaml From 4b34b16e38364f5974c225cd5538755a4dabee88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 4 Nov 2022 16:33:59 +0100 Subject: [PATCH 052/551] Update README about running unit tests --- README-dev.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README-dev.rst b/README-dev.rst index f2d044b103..b9de2eebce 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -95,11 +95,13 @@ Running Unit Tests ------------------ Unit tests can be run like so:: - nosetests -w tests/unit/ + python -m pytest --import-mode append tests/unit -k 'not (test_connection_initialization or test_cloud)' + EVENT_LOOP_MANAGER=gevent python -m pytest --import-mode append tests/unit/io/test_geventreactor.py + EVENT_LOOP_MANAGER=eventlet python -m pytest --import-mode append tests/unit/io/test_eventletreactor.py You can run a specific test method like so:: - nosetests -w tests/unit/test_connection.py:ConnectionTest.test_bad_protocol_version + python -m pytest tests/unit/test_connection.py::ConnectionTest::test_bad_protocol_version Running Integration Tests ------------------------- @@ -128,11 +130,11 @@ Seeing Test Logs in Real Time ----------------------------- Sometimes it's useful to output logs for the tests as they run:: - nosetests -w tests/unit/ --nocapture --nologcapture + python -m pytest -s tests/unit/ Use tee to capture logs and see them on your terminal:: - nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | tee test.log + python -m pytest -s tests/unit/ 2>&1 | tee test.log Testing Multiple Python Versions -------------------------------- From 0d8ea0a342df10dcd678612da64198f67c5f722c Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 6 Nov 2022 13:56:39 +0200 Subject: [PATCH 053/551] github-actions: add action to test python2 support --- .github/workflows/test-python2.yaml | 59 +++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 .github/workflows/test-python2.yaml diff --git a/.github/workflows/test-python2.yaml b/.github/workflows/test-python2.yaml new file mode 100644 index 0000000000..e3fe9635ac --- /dev/null +++ b/.github/workflows/test-python2.yaml @@ -0,0 +1,59 @@ +name: Build and test python2 + +on: [push, pull_request] + +jobs: + test: + name: Test on python2 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-python@v4 + name: Install Python2.7 + with: + python-version: '2.7' + - name: Run unittests + run: |- + pip install -r ./test-requirements.txt + pytest --import-mode append ./tests/unit -k 'not (test_connection_initialization or test_cloud)' + EVENT_LOOP_MANAGER=gevent pytest --import-mode append ./tests/unit/io/test_geventreactor.py + EVENT_LOOP_MANAGER=eventlet pytest --import-mode append ./tests/unit/io/test_eventletreactor.py + + build: + name: Build source/wheel distribution for python2 + if: "(!contains(github.event.pull_request.labels.*.name, 'disable-test-build'))|| github.event_name == 'push' && endsWith(github.event.ref, 'scylla')" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-python@v4 + name: Install Python2.7 + with: + python-version: '2.7' + + - name: Build sdist + run: python setup.py sdist + + - uses: actions/upload-artifact@v2 + with: + path: dist/*.tar.gz + + upload_pypi: + needs: [build, test] + runs-on: ubuntu-latest + # upload to PyPI on every tag starting with 'v' + if: github.event_name == 'push' && endsWith(github.event.ref, 'scylla') + # alternatively, to publish when a GitHub Release is created, use the following rule: + # if: github.event_name == 'release' && github.event.action == 'published' + steps: + - uses: actions/download-artifact@v2 + with: + name: artifact + path: dist + + - uses: pypa/gh-action-pypi-publish@master + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} + From 5e5919a92f0e5e4bc3b4f5fc6587cd5fc3df45a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 9 Nov 2022 12:14:54 +0100 Subject: [PATCH 054/551] Add integration tests for Python2 --- .../workflows/integration-tests-python2.yml | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/integration-tests-python2.yml diff --git a/.github/workflows/integration-tests-python2.yml b/.github/workflows/integration-tests-python2.yml new file mode 100644 index 0000000000..e42a94a5a6 --- /dev/null +++ b/.github/workflows/integration-tests-python2.yml @@ -0,0 +1,22 @@ +name: Integration tests Python2 + +on: + pull_request: + branches: + - master + +jobs: + tests: + runs-on: ubuntu-20.04 + if: "!contains(github.event.pull_request.labels.*.name, 'disable-integration-tests')" + steps: + - uses: actions/checkout@v2 + - name: Install Python2.7 + uses: actions/setup-python@v4 + with: + python-version: 2.7 + + - name: Test with pytest + run: | + ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py + # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py From 1d35a13baf08c85f363a4f72c20ffc93de76a23c Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 8 Nov 2022 09:01:22 +0200 Subject: [PATCH 055/551] scylla/cloud.py: support using ip address in `server` Align with scylladb/gocql#106, so: When host information was missing, driver used resolved IP address as TLS.ServerName. Instead it should connect to Server specified in ConnectionConfig and use NodeDomain as SNI. Depends: https://github.com/scylladb/scylla-ccm/pull/412 Ref: https://github.com/scylladb/gocql/pull/106 --- cassandra/scylla/cloud.py | 5 ++--- .../integration/standard/test_scylla_cloud.py | 20 +++++++------------ 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index d9ad264155..cce4a92bb0 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -83,13 +83,12 @@ def __init__(self, configuration_file, pyopenssl=False, endpoint_factory=None): if username and password: self.auth_provider = PlainTextAuthProvider(username, password) - @property def contact_points(self): _contact_points = [] for data_center in self.data_centers.values(): - address, _, _ = self.get_server(data_center) - _contact_points.append(self.endpoint_factory.create_from_sni(address)) + _, _, node_domain = self.get_server(data_center) + _contact_points.append(self.endpoint_factory.create_from_sni(node_domain)) return _contact_points def get_server(self, data_center): diff --git a/tests/integration/standard/test_scylla_cloud.py b/tests/integration/standard/test_scylla_cloud.py index c5fe9ce346..bdf08f5f26 100644 --- a/tests/integration/standard/test_scylla_cloud.py +++ b/tests/integration/standard/test_scylla_cloud.py @@ -5,7 +5,6 @@ from tests.integration import use_cluster from cassandra.cluster import Cluster, TwistedConnection -from cassandra.connection import SniEndPointFactory from cassandra.io.asyncorereactor import AsyncoreConnection from cassandra.io.libevreactor import LibevConnection from cassandra.io.geventreactor import GeventConnection @@ -45,21 +44,17 @@ def start_cluster_with_proxy(self): ccm_cluster.sni_proxy_listen_port = listen_port ccm_cluster._update_config() - config_data_yaml, config_path_yaml = create_cloud_config(ccm_cluster.get_path(), listen_port) - - endpoint_factory = SniEndPointFactory(listen_address, port=int(listen_port), - node_domain="cluster-id.scylla.com") - - return config_data_yaml, config_path_yaml, endpoint_factory + config_data_yaml, config_path_yaml = create_cloud_config(ccm_cluster.get_path(), + port=listen_port, address=listen_address) + return config_data_yaml, config_path_yaml def test_1_node_cluster(self): self.ccm_cluster = use_cluster("sni_proxy", [1], start=False) - config_data_yaml, config_path_yaml, endpoint_factory = self.start_cluster_with_proxy() + config_data_yaml, config_path_yaml = self.start_cluster_with_proxy() for config in [config_path_yaml, config_data_yaml]: for connection_class in supported_connection_classes: - cluster = Cluster(scylla_cloud=config, connection_class=connection_class, - endpoint_factory=endpoint_factory) + cluster = Cluster(scylla_cloud=config, connection_class=connection_class) with cluster.connect() as session: res = session.execute("SELECT * FROM system.local") assert res.all() @@ -69,12 +64,11 @@ def test_1_node_cluster(self): def test_3_node_cluster(self): self.ccm_cluster = use_cluster("sni_proxy", [3], start=False) - config_data_yaml, config_path_yaml, endpoint_factory = self.start_cluster_with_proxy() + config_data_yaml, config_path_yaml = self.start_cluster_with_proxy() for config in [config_path_yaml, config_data_yaml]: for connection_class in supported_connection_classes: - cluster = Cluster(scylla_cloud=config, connection_class=connection_class, - endpoint_factory=endpoint_factory) + cluster = Cluster(scylla_cloud=config, connection_class=connection_class) with cluster.connect() as session: res = session.execute("SELECT * FROM system.local") assert res.all() From 44c0bd84e7603123346ac1e3d803b4eb640f344f Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 8 Nov 2022 09:00:37 +0200 Subject: [PATCH 056/551] scylla/cloud.py: fix case in insecureSkipTlsVerify --- cassandra/scylla/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index cce4a92bb0..08e16ced0d 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -68,7 +68,7 @@ def __init__(self, configuration_file, pyopenssl=False, endpoint_factory=None): self.data_centers = cloud_config['datacenters'] self.auth_info = cloud_config['authInfos'][self.current_context['authInfoName']] self.ssl_options = {} - self.skip_tls_verify = self.auth_info.get('insecureSkipTLSVerify', False) + self.skip_tls_verify = self.auth_info.get('insecureSkipTlsVerify', False) self.ssl_context = self.create_pyopenssl_context() if pyopenssl else self.create_ssl_context() proxy_address, port, node_domain = self.get_server(self.data_centers[self.current_context['datacenterName']]) From 6e603f333de06f8d67a2521d80377f99c3b978af Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 8 Nov 2022 10:12:54 +0200 Subject: [PATCH 057/551] scylla/cloud.py: change default port on inital implemetion we default the `server` port to 443 if wasn't specified, it was decided that we should default it to the default CQL SSL port (9142) --- cassandra/scylla/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index 08e16ced0d..e06cfd18de 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -94,7 +94,7 @@ def contact_points(self): def get_server(self, data_center): address = data_center.get('server') address = address.split(":") - port = nth(address, 1, default=443) + port = nth(address, 1, default=9142) address = nth(address, 0) node_domain = data_center.get('nodeDomain') assert address and port and node_domain, "server or nodeDomain are missing" From 8bbc05a6407540e024e989a9c62b9d83d4f05ada Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 9 Nov 2022 14:31:46 +0200 Subject: [PATCH 058/551] Release 3.25.7 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ed8ce5acfb..d5b31f1783 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 6) +__version_info__ = (3, 25, 7) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 4583f4b62f..bbfc0df57e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.6-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.7-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.25.6-scylla' +LATEST_VERSION = '3.25.7-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From bf7ad4f4df0731ab9143fe4dc60b903e34f43245 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 10 Nov 2022 14:58:12 +0100 Subject: [PATCH 059/551] Fix python2 incompatibilities in scylla/cloud.py --- cassandra/scylla/cloud.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index e06cfd18de..b380ab70b6 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -34,7 +34,7 @@ def file_or_memory(path=None, data=None): # so we use temporary file to load the key if data: with tempfile.NamedTemporaryFile(mode="wb") as f: - d = base64.decodebytes(bytes(data, encoding='utf-8')) + d = base64.b64decode(data) f.write(d) if not d.endswith(b"\n"): f.write(b"\n") @@ -102,11 +102,11 @@ def get_server(self, data_center): def create_ssl_context(self): ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_SSLv23) - ssl_context.verify_mode = ssl.VerifyMode.CERT_NONE if self.skip_tls_verify else ssl.VerifyMode.CERT_REQUIRED + ssl_context.verify_mode = ssl.CERT_NONE if self.skip_tls_verify else ssl.CERT_REQUIRED for data_center in self.data_centers.values(): with file_or_memory(path=data_center.get('certificateAuthorityPath'), data=data_center.get('certificateAuthorityData')) as cafile: - ssl_context.load_verify_locations(cadata=open(cafile).read()) + ssl_context.load_verify_locations(cadata=six.text_type(open(cafile).read())) with file_or_memory(path=self.auth_info.get('clientCertificatePath'), data=self.auth_info.get('clientCertificateData')) as certfile, \ file_or_memory(path=self.auth_info.get('clientKeyPath'), data=self.auth_info.get('clientKeyData')) as keyfile: From 9848bacd07d9d04207d7e6797a50a2a9ffb70f93 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 13 Nov 2022 12:46:09 +0200 Subject: [PATCH 060/551] metadata pagination: last page wasn't always handled Seem like there was a bug in metadata pagination that it was breaking on the notice no next page while no yeilding the parsed rows of that last page in most cases since the defaut of rows per page is 1000 that was enough, but in the case there were more keyspaces then that limit, some of the keyspaces were missed and it was failing the SCT test. Fix: #174 --- cassandra/metadata.py | 2 ++ tests/integration/standard/test_metadata.py | 29 +++++++++++++++++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index ce0ed63bd2..7397365407 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -1982,6 +1982,8 @@ def get_next_pages(): elif not next_success: raise next_result if not next_result.paging_state: + if next_result.parsed_rows: + yield next_result.parsed_rows break yield next_result.parsed_rows diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c1e26bc5d9..eda1562c4c 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1052,13 +1052,38 @@ class Ext1(Ext0): def test_metadata_pagination(self): self.cluster.refresh_schema_metadata() - for i in range(10): + for i in range(12): self.session.execute("CREATE TABLE %s.%s_%d (a int PRIMARY KEY, b map)" % (self.keyspace_name, self.function_table_name, i)) self.cluster.schema_metadata_page_size = 5 self.cluster.refresh_schema_metadata() - self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].tables), 10) + self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].tables), 12) + + def test_metadata_pagination_keyspaces(self): + """ + test for covering + https://github.com/scylladb/python-driver/issues/174 + """ + + self.cluster.refresh_schema_metadata() + keyspaces = [f"keyspace{idx}" for idx in range(15)] + + for ks in keyspaces: + self.session.execute( + f"CREATE KEYSPACE IF NOT EXISTS {ks} WITH REPLICATION = {{ 'class' : 'SimpleStrategy', 'replication_factor' : 3 }}" + ) + + self.cluster.schema_metadata_page_size = 2000 + self.cluster.refresh_schema_metadata() + before_ks_num = len(self.cluster.metadata.keyspaces) + + self.cluster.schema_metadata_page_size = 10 + self.cluster.refresh_schema_metadata() + + after_ks_num = len(self.cluster.metadata.keyspaces) + + self.assertEqual(before_ks_num, after_ks_num) class TestCodeCoverage(unittest.TestCase): From a67a727072a48c46e226e83d1db75cbaecee4a27 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 15 Nov 2022 17:26:36 +0200 Subject: [PATCH 061/551] Release 3.25.8 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index d5b31f1783..99854b9917 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 7) +__version_info__ = (3, 25, 8) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index bbfc0df57e..c6ab75caf5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.7-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.8-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.25.7-scylla' +LATEST_VERSION = '3.25.8-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From a44418874850c74f1fc7b359a3de1a20b26d7385 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 15 Nov 2022 16:01:35 +0200 Subject: [PATCH 062/551] testing to fix ScyllaCloudConfigTests failing in CI --- ci/run_integration_test.sh | 22 +++++-------------- .../integration/standard/test_scylla_cloud.py | 3 +++ 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh index 7c1396a665..72fa1901b0 100755 --- a/ci/run_integration_test.sh +++ b/ci/run_integration_test.sh @@ -15,7 +15,7 @@ if (( aio_max_nr != aio_max_nr_recommended_value )); then fi fi -BRANCH='branch-5.0' +SCYLLA_RELEASE='release:5.0' python3 -m venv .test-venv source .test-venv/bin/activate @@ -32,27 +32,15 @@ pip install awscli pip install https://github.com/scylladb/scylla-ccm/archive/master.zip # download version -LATEST_MASTER_JOB_ID=`aws --no-sign-request s3 ls downloads.scylladb.com/unstable/scylla/${BRANCH}/relocatable/ | tr -s ' ' | cut -d ' ' -f 3 | tr -d '\/' | sort -g | tail -n 1` -AWS_BASE=s3://downloads.scylladb.com/unstable/scylla/${BRANCH}/relocatable/${LATEST_MASTER_JOB_ID} - -aws s3 --no-sign-request cp ${AWS_BASE}/scylla-package.tar.gz . & -aws s3 --no-sign-request cp ${AWS_BASE}/scylla-tools-package.tar.gz . & -aws s3 --no-sign-request cp ${AWS_BASE}/scylla-jmx-package.tar.gz . & -wait - -ccm create scylla-driver-temp -n 1 --scylla --version unstable/${BRANCH}:$LATEST_MASTER_JOB_ID \ - --scylla-core-package-uri=./scylla-package.tar.gz \ - --scylla-tools-java-package-uri=./scylla-tools-package.tar.gz \ - --scylla-jmx-package-uri=./scylla-jmx-package.tar.gz +ccm create scylla-driver-temp -n 1 --scylla --version ${SCYLLA_RELEASE} ccm remove # run test -echo "export SCYLLA_VERSION=unstable/${BRANCH}:${LATEST_MASTER_JOB_ID}" +echo "export SCYLLA_VERSION=${SCYLLA_RELEASE}" echo "PROTOCOL_VERSION=4 EVENT_LOOP_MANAGER=asyncio pytest --import-mode append tests/integration/standard/" -export SCYLLA_VERSION=unstable/${BRANCH}:${LATEST_MASTER_JOB_ID} +export SCYLLA_VERSION=${SCYLLA_RELEASE} export MAPPED_SCYLLA_VERSION=3.11.4 -PROTOCOL_VERSION=4 EVENT_LOOP_MANAGER=asyncio pytest -rf --import-mode append $* - +PROTOCOL_VERSION=4 EVENT_LOOP_MANAGER=libev pytest -rf --import-mode append $* diff --git a/tests/integration/standard/test_scylla_cloud.py b/tests/integration/standard/test_scylla_cloud.py index bdf08f5f26..2106407ebf 100644 --- a/tests/integration/standard/test_scylla_cloud.py +++ b/tests/integration/standard/test_scylla_cloud.py @@ -1,3 +1,4 @@ +import logging import os.path from unittest import TestCase from ccmlib.utils.ssl_utils import generate_ssl_stores @@ -54,6 +55,7 @@ def test_1_node_cluster(self): for config in [config_path_yaml, config_data_yaml]: for connection_class in supported_connection_classes: + logging.warning('testing with class: %s', connection_class.__name__) cluster = Cluster(scylla_cloud=config, connection_class=connection_class) with cluster.connect() as session: res = session.execute("SELECT * FROM system.local") @@ -68,6 +70,7 @@ def test_3_node_cluster(self): for config in [config_path_yaml, config_data_yaml]: for connection_class in supported_connection_classes: + logging.warning('testing with class: %s', connection_class.__name__) cluster = Cluster(scylla_cloud=config, connection_class=connection_class) with cluster.connect() as session: res = session.execute("SELECT * FROM system.local") From a8f01e0bc5824a64e6ffcbd71682b202d6c57271 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 23 Nov 2022 11:03:39 +0200 Subject: [PATCH 063/551] deafult to disable_shardaware_port=False when using scylla_cloud Since we are going to work via a loadbalancer, shardaware base on port can't work for us. also if some would try to enable it via configuration, we'll fail like this: ``` Traceback (most recent call last): File "../python-driver/cloud_config.py", line 59, in thread1() File "../python-driver/cloud_config.py", line 39, in thread1 cluster = Cluster(scylla_cloud='../config_data.yaml', connect_timeout=60, control_connection_timeout=30, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "../python-driver/cassandra/cluster.py", line 1167, in __init__ raise ValueError("shard_aware_options.disable_shardaware_port=False " ValueError: shard_aware_options.disable_shardaware_port=False cannot be specified with a scylla cloud configuration ``` Fixes: scylladb/scylla-operator#1104 --- cassandra/cluster.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 80a1ef9b4c..43d0f768a1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1163,7 +1163,9 @@ def __init__(self, if contact_points is not _NOT_SET or ssl_context or ssl_options: raise ValueError("contact_points, ssl_context, and ssl_options " "cannot be specified with a scylla cloud configuration") - + if shard_aware_options and not shard_aware_options.disable_shardaware_port: + raise ValueError("shard_aware_options.disable_shardaware_port=False " + "cannot be specified with a scylla cloud configuration") uses_twisted = TwistedConnection and issubclass(self.connection_class, TwistedConnection) uses_eventlet = EventletConnection and issubclass(self.connection_class, EventletConnection) @@ -1174,6 +1176,7 @@ def __init__(self, contact_points = scylla_cloud_config.contact_points ssl_options = scylla_cloud_config.ssl_options auth_provider = scylla_cloud_config.auth_provider + shard_aware_options = ShardAwareOptions(shard_aware_options, disable_shardaware_port=True) if cloud is not None: self.cloud = cloud From ce3a7b4ad1a28ee3bf2768551025b60ffcaea48f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wojciech=20Przytu=C5=82a?= Date: Fri, 25 Nov 2022 13:09:19 +0100 Subject: [PATCH 064/551] Fixed yaml model (insecureSkipTlsVerify) Field insecureSkipTlsVerify was incorrectly expected to be in AuthInfo instead of Datacenter. --- cassandra/scylla/cloud.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index b380ab70b6..9ba898ba3b 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -66,12 +66,13 @@ def __init__(self, configuration_file, pyopenssl=False, endpoint_factory=None): self.current_context = cloud_config['contexts'][cloud_config['currentContext']] self.data_centers = cloud_config['datacenters'] + self.current_data_center = self.data_centers[self.current_context['datacenterName']] self.auth_info = cloud_config['authInfos'][self.current_context['authInfoName']] self.ssl_options = {} - self.skip_tls_verify = self.auth_info.get('insecureSkipTlsVerify', False) + self.skip_tls_verify = self.current_data_center.get('insecureSkipTlsVerify', False) self.ssl_context = self.create_pyopenssl_context() if pyopenssl else self.create_ssl_context() - proxy_address, port, node_domain = self.get_server(self.data_centers[self.current_context['datacenterName']]) + proxy_address, port, node_domain = self.get_server(self.current_data_center) if not endpoint_factory: endpoint_factory = SniEndPointFactory(proxy_address, port=int(port), node_domain=node_domain) From 230b9384f3434fb5f8314f1ad88d181855f33f9f Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Fri, 25 Nov 2022 07:27:51 +0200 Subject: [PATCH 065/551] Update entrypoint and host_id only when `SniEndPointFactory` used Since there are use cases when the user uses external address (i.e. not the node broadcast address), and not using `AddressTranslator`) the assumption of the user is that it would be able to connect and use the control connection without creating any more connection to other nodes. when we chnage the logic to work based on host_id, we decided to remove the initial control connection host, since it didn't had a correct host_name, which break this use case. so for now we'll leave the removal only for case we are sure it's the expect thing to happen, i.e. when `SniEndPointFactory` is used. Fix: #184 --- cassandra/cluster.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 43d0f768a1..99fb995945 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3837,9 +3837,14 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, datacenter = local_row.get("data_center") rack = local_row.get("rack") self._update_location_info(host, datacenter, rack) - new_endpoint = self._cluster.endpoint_factory.create(local_row) - if new_endpoint.address: - host.endpoint = new_endpoint + + # support the use case of connecting only with public address + if isinstance(self._cluster.endpoint_factory, SniEndPointFactory): + new_endpoint = self._cluster.endpoint_factory.create(local_row) + + if new_endpoint.address: + host.endpoint = new_endpoint + host.host_id = local_row.get("host_id") found_host_ids.add(host.host_id) From 9c50105aa38e14f9ba8a5ee7600875ea1ae15112 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 28 Nov 2022 19:22:46 +0200 Subject: [PATCH 066/551] Release 3.25.9 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 99854b9917..be6dc47016 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 8) +__version_info__ = (3, 25, 9) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index c6ab75caf5..9327464148 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.8-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.9-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.25.8-scylla' +LATEST_VERSION = '3.25.9-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From e113a67f7ba0178619b4b4572401cde8d7f379e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 30 Nov 2022 15:36:20 +0100 Subject: [PATCH 067/551] Run integration tests on master --- .github/workflows/integration-tests-python2.yml | 3 +++ .github/workflows/integration-tests.yml | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/integration-tests-python2.yml b/.github/workflows/integration-tests-python2.yml index e42a94a5a6..c44b4e4b1f 100644 --- a/.github/workflows/integration-tests-python2.yml +++ b/.github/workflows/integration-tests-python2.yml @@ -4,6 +4,9 @@ on: pull_request: branches: - master + push: + branches: + - master jobs: tests: diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 1939cc43d3..ff0a5685ce 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -4,7 +4,9 @@ on: pull_request: branches: - master - + push: + branches: + - master jobs: tests: From 71001c3f7a3b5de6b1168d3af5eda7012c70e569 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 30 Nov 2022 20:10:28 +0100 Subject: [PATCH 068/551] Use ubuntu 20.04 for python2 tests --- .github/workflows/test-python2.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-python2.yaml b/.github/workflows/test-python2.yaml index e3fe9635ac..532f02d084 100644 --- a/.github/workflows/test-python2.yaml +++ b/.github/workflows/test-python2.yaml @@ -5,7 +5,7 @@ on: [push, pull_request] jobs: test: name: Test on python2 - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 @@ -23,7 +23,7 @@ jobs: build: name: Build source/wheel distribution for python2 if: "(!contains(github.event.pull_request.labels.*.name, 'disable-test-build'))|| github.event_name == 'push' && endsWith(github.event.ref, 'scylla')" - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 @@ -41,7 +41,7 @@ jobs: upload_pypi: needs: [build, test] - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 # upload to PyPI on every tag starting with 'v' if: github.event_name == 'push' && endsWith(github.event.ref, 'scylla') # alternatively, to publish when a GitHub Release is created, use the following rule: From 07bd1d8aa772aa1897bddcaf1c8d156d42b9ed79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 30 Nov 2022 16:36:14 +0100 Subject: [PATCH 069/551] Add a test reproducing 'USE ks' race condition --- .../workflows/integration-tests-python2.yml | 2 +- .github/workflows/integration-tests.yml | 2 +- .../integration/standard/test_use_keyspace.py | 71 +++++++++++++++++++ 3 files changed, 73 insertions(+), 2 deletions(-) create mode 100644 tests/integration/standard/test_use_keyspace.py diff --git a/.github/workflows/integration-tests-python2.yml b/.github/workflows/integration-tests-python2.yml index c44b4e4b1f..ee2b835b3c 100644 --- a/.github/workflows/integration-tests-python2.yml +++ b/.github/workflows/integration-tests-python2.yml @@ -21,5 +21,5 @@ jobs: - name: Test with pytest run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py + ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index ff0a5685ce..db8efb3125 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -21,5 +21,5 @@ jobs: - name: Test with pytest run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py + ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py diff --git a/tests/integration/standard/test_use_keyspace.py b/tests/integration/standard/test_use_keyspace.py new file mode 100644 index 0000000000..578d4b2256 --- /dev/null +++ b/tests/integration/standard/test_use_keyspace.py @@ -0,0 +1,71 @@ +import os +import time +import random +from subprocess import run +import logging + +try: + from concurrent.futures import ThreadPoolExecutor, as_completed +except ImportError: + from futures import ThreadPoolExecutor, as_completed # noqa + +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +from mock import patch + +from cassandra.connection import Connection +from cassandra.cluster import Cluster +from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, ConstantReconnectionPolicy +from cassandra import OperationTimedOut, ConsistencyLevel + +from tests.integration import use_cluster, get_node, PROTOCOL_VERSION + +LOGGER = logging.getLogger(__name__) + +def setup_module(): + os.environ['SCYLLA_EXT_OPTS'] = "--smp 2 --memory 2048M" + use_cluster('shared_aware', [3], start=True) + + + +class TestUseKeyspace(unittest.TestCase): + @classmethod + def setup_class(cls): + cls.cluster = Cluster(contact_points=["127.0.0.1"], protocol_version=PROTOCOL_VERSION, + load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), + reconnection_policy=ConstantReconnectionPolicy(1)) + cls.session = cls.cluster.connect() + LOGGER.info(cls.cluster.is_shard_aware()) + LOGGER.info(cls.cluster.shard_aware_stats()) + @classmethod + def teardown_class(cls): + cls.cluster.shutdown() + + def test_set_keyspace_slow_connection(self): + # Test that "USE keyspace" gets propagated + # to all connections. + # + # Reproduces an issue #187 where some pending + # connections for shards would not + # receive "USE keyspace". + # + # Simulate that scenario by adding an artifical + # delay before sending "USE keyspace" on + # connections. + + original_set_keyspace_blocking = Connection.set_keyspace_blocking + def patched_set_keyspace_blocking(*args, **kwargs): + time.sleep(1) + return original_set_keyspace_blocking(*args, **kwargs) + + with patch.object(Connection, "set_keyspace_blocking", patched_set_keyspace_blocking): + self.session.execute("CREATE KEYSPACE test_set_keyspace WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") + self.session.execute("CREATE TABLE test_set_keyspace.set_keyspace_slow_connection(pk int, PRIMARY KEY(pk))") + + session2 = self.cluster.connect() + session2.execute("USE test_set_keyspace") + for i in range(200): + session2.execute(f"SELECT * FROM set_keyspace_slow_connection WHERE pk = 1") From 0e6e37bc8b9a8cbe18dc0700e22da3907b362ec0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 30 Nov 2022 16:37:22 +0100 Subject: [PATCH 070/551] Fix 'USE ks' race condition --- cassandra/pool.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index 2f3fea93ed..e310cb39e7 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -739,6 +739,9 @@ def _open_connection_to_missing_shard(self, shard_id): conn.shard_id, self.host ) + if self._keyspace: + conn.set_keyspace_blocking(self._keyspace) + self._connections[conn.shard_id] = conn if old_conn is not None: remaining = old_conn.in_flight - len(old_conn.orphaned_request_ids) @@ -763,13 +766,6 @@ def _open_connection_to_missing_shard(self, shard_id): old_conn.close() else: self._trash.add(old_conn) - if self._keyspace: - with self._lock: - if self.is_shutdown: - conn.close() - old_conn = self._connections.get(conn.shard_id) - if old_conn: - old_conn.set_keyspace_blocking(self._keyspace) num_missing_or_needing_replacement = self.num_missing_or_needing_replacement log.debug( "Connected to %s/%i shards on host %s (%i missing or needs replacement)", From 6959e3092e482ac4609c6865b96abf2c1e1a2628 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 30 Nov 2022 23:24:48 +0200 Subject: [PATCH 071/551] Release 3.25.10 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index be6dc47016..94de644dd8 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 9) +__version_info__ = (3, 25, 10) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 9327464148..ebe4acc6f6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.9-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.10-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.25.9-scylla' +LATEST_VERSION = '3.25.10-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From 64e9d84ffa5bfc6fd76721dfcd4de7be7f4fbfae Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 30 Nov 2022 23:31:52 +0200 Subject: [PATCH 072/551] docs: update broken links to scylla docs those were changed cause of scylla docs were refactored Fix: #188 --- docs/scylla_specific.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/scylla_specific.rst b/docs/scylla_specific.rst index 24e2182dc6..101ddb534b 100644 --- a/docs/scylla_specific.rst +++ b/docs/scylla_specific.rst @@ -8,12 +8,12 @@ Shard Awareness As a result, latency is significantly reduced because there is no need to pass data between the shards. Details on the scylla cql protocol extensions -https://github.com/scylladb/scylla/blob/master/docs/design-notes/protocol-extensions.md#intranode-sharding +https://github.com/scylladb/scylla/blob/master/docs/dev/protocol-extensions.md#intranode-sharding For using it you only need to enable ``TokenAwarePolicy`` on the ``Cluster`` See the configuration of ``native_shard_aware_transport_port`` and ``native_shard_aware_transport_port_ssl`` on scylla.yaml: -https://github.com/scylladb/scylla/blob/master/docs/design-notes/protocols.md#cql-client-protocol +https://github.com/scylladb/scylla/blob/master/docs/dev/protocols.md#cql-client-protocol .. code:: python From 3d16fd3bc0f59762f22268538f5a7480576d97d9 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 1 Dec 2022 00:19:38 +0200 Subject: [PATCH 073/551] docs: install python3-dev --- docs/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Makefile b/docs/Makefile index de0bf4afd2..93317e21fe 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -25,7 +25,7 @@ all: dirhtml .PHONY: setupenv setupenv: pip install -q poetry - sudo apt-get install gcc python-dev libev4 libev-dev + sudo apt-get install gcc python3-dev libev4 libev-dev .PHONY: setup setup: From 57d8dee4b4d52db8e3315c4732a0191d3dcfef41 Mon Sep 17 00:00:00 2001 From: IlyaOrlov Date: Mon, 12 Dec 2022 23:42:12 +0300 Subject: [PATCH 074/551] Remove extra call of self.next() Fix for the issue https://github.com/scylladb/python-driver/issues/194 --- cassandra/cluster.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 99fb995945..9fc2042d2d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -5253,7 +5253,6 @@ def next(self): if not self.response_future._continuous_paging_session: self.fetch_next_page() self._page_iter = iter(self._current_rows) - return self.next() # Some servers can return empty pages in this case; Scylla is known to do # so in some circumstances. Guard against this by recursing to handle From b01372a879eacfc525d79fa299da1288d8342291 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 16 Dec 2022 15:17:51 -0600 Subject: [PATCH 075/551] Applying fixes to Jenkinsfile to get build working in AWS --- Jenkinsfile | 26 +++----------------------- 1 file changed, 3 insertions(+), 23 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index abb6092758..58f189ebee 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -357,26 +357,6 @@ def getDriverMetricType() { return metric_type } -def submitCIMetrics(buildType) { - long durationMs = currentBuild.duration - long durationSec = durationMs / 1000 - long nowSec = (currentBuild.startTimeInMillis + durationMs) / 1000 - def branchNameNoPeriods = env.BRANCH_NAME.replaceAll('\\.', '_') - metric_type = getDriverMetricType() - def durationMetric = "okr.ci.python.${metric_type}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" - - timeout(time: 1, unit: 'MINUTES') { - withCredentials([string(credentialsId: 'lab-grafana-address', variable: 'LAB_GRAFANA_ADDRESS'), - string(credentialsId: 'lab-grafana-port', variable: 'LAB_GRAFANA_PORT')]) { - withEnv(["DURATION_METRIC=${durationMetric}"]) { - sh label: 'Send runtime metrics to labgrafana', script: '''#!/bin/bash -lex - echo "${DURATION_METRIC}" | nc -q 5 ${LAB_GRAFANA_ADDRESS} ${LAB_GRAFANA_PORT} - ''' - } - } - } -} - def describeBuild(buildContext) { script { def runtimes = buildContext.matrix["RUNTIME"] @@ -387,7 +367,9 @@ def describeBuild(buildContext) { } } -def scheduleTriggerJobName = "drivers/python/oss/master/disabled" +def scheduleTriggerJobName() { + "drivers/python/oss/master/disabled" +} pipeline { agent none @@ -663,8 +645,6 @@ pipeline { // build and test all builds parallel getMatrixBuilds(context) - // send the metrics - submitCIMetrics('commit') slack.notifyChannel(currentBuild.currentResult) } } From 5503bab7688edbd378b424eb917d056c690c871c Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Fri, 16 Dec 2022 15:47:10 -0600 Subject: [PATCH 076/551] Fix to prior fix --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 58f189ebee..f39a79a8b4 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -608,7 +608,7 @@ pipeline { } triggers { - parameterizedCron((scheduleTriggerJobName == env.JOB_NAME) ? """ + parameterizedCron((scheduleTriggerJobName() == env.JOB_NAME) ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 From 3dbe62012389250e11d0f0e6cf731a5278af35d7 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 21 Dec 2022 14:48:08 -0600 Subject: [PATCH 077/551] Smaller smoke test configuration to avoid explosion of test builds in AWS --- Jenkinsfile | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index f39a79a8b4..283220e57d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -50,6 +50,15 @@ matrices = [ "SERVER": ['dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] + ], + /* + CI-friendly test configuration. Currently-supported Python version + modern C*/DSE instances. + We also avoid cython since it's tested as part of the nightlies. + */ + "SMOKE": [ + "SERVER": ['3.11', '4.0', 'dse-6.8'], + "RUNTIME": ['3.7.7', '3.8.3'], + "CYTHON": ["False"] ] ] @@ -72,7 +81,7 @@ def getBuildContext() { def profile = "${params.PROFILE}" def EVENT_LOOP = "${params.EVENT_LOOP.toLowerCase()}" - matrixType = "FULL" + matrixType = "SMOKE" developBranchPattern = ~"((dev|long)-)?python-.*" if (developBranchPattern.matcher(env.BRANCH_NAME).matches()) { @@ -404,7 +413,7 @@ pipeline { ''') choice( name: 'PROFILE', - choices: ['STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], + choices: ['SMOKE', 'STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], description: '''

Profile to utilize for scheduled or adhoc builds

@@ -413,6 +422,10 @@ pipeline { + + + + From 51416d9597d991eab22163a06a4b88cdd0939b8a Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 21 Dec 2022 14:52:21 -0600 Subject: [PATCH 078/551] Hey, let's actually update the right things, shall we? --- Jenkinsfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 283220e57d..e8281a15b5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -413,7 +413,7 @@ pipeline {
Choice Description
SMOKEBasic smoke tests for current Python runtimes + C*/DSE versions
STANDARD Execute the standard tests for the driver
''') choice( name: 'PROFILE', - choices: ['SMOKE', 'STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], + choices: ['STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], description: '''

Profile to utilize for scheduled or adhoc builds

@@ -422,10 +422,6 @@ pipeline { - - - - @@ -445,7 +441,7 @@ pipeline {
Choice Description
SMOKEBasic smoke tests for current Python runtimes + C*/DSE versions
STANDARD Execute the standard tests for the driver
''') choice( name: 'MATRIX', - choices: ['DEFAULT', 'FULL', 'DEVELOP', 'CASSANDRA', 'DSE'], + choices: ['DEFAULT', 'SMOKE', 'FULL', 'DEVELOP', 'CASSANDRA', 'DSE'], description: '''

The matrix for the build.

@@ -458,6 +454,10 @@ pipeline { + + + + From 1c9b4bd34da54d2ad2fccdf8479b62704615bd50 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 21 Dec 2022 14:56:05 -0600 Subject: [PATCH 079/551] Groovy fixes --- Jenkinsfile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e8281a15b5..9536f52aa7 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -30,6 +30,9 @@ import com.datastax.jenkins.drivers.python.Slack slack = new Slack() // Define our predefined matrices +// +// Smoke tests are CI-friendly test configuration. Currently-supported Python version + modern C*/DSE instances. +// We also avoid cython since it's tested as part of the nightlies. matrices = [ "FULL": [ "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], @@ -51,10 +54,6 @@ matrices = [ "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] ], - /* - CI-friendly test configuration. Currently-supported Python version + modern C*/DSE instances. - We also avoid cython since it's tested as part of the nightlies. - */ "SMOKE": [ "SERVER": ['3.11', '4.0', 'dse-6.8'], "RUNTIME": ['3.7.7', '3.8.3'], From 4e967004566a9053b679e72fbe65ece15e5c92a4 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 13 Dec 2022 19:08:05 +0200 Subject: [PATCH 080/551] test_use_keyspace.py: remove unneeded code --- tests/integration/standard/test_use_keyspace.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/tests/integration/standard/test_use_keyspace.py b/tests/integration/standard/test_use_keyspace.py index 578d4b2256..42cf03a553 100644 --- a/tests/integration/standard/test_use_keyspace.py +++ b/tests/integration/standard/test_use_keyspace.py @@ -1,14 +1,7 @@ import os import time -import random -from subprocess import run import logging -try: - from concurrent.futures import ThreadPoolExecutor, as_completed -except ImportError: - from futures import ThreadPoolExecutor, as_completed # noqa - try: import unittest2 as unittest except ImportError: @@ -19,18 +12,18 @@ from cassandra.connection import Connection from cassandra.cluster import Cluster from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, ConstantReconnectionPolicy -from cassandra import OperationTimedOut, ConsistencyLevel -from tests.integration import use_cluster, get_node, PROTOCOL_VERSION +from tests.integration import use_cluster, PROTOCOL_VERSION, local LOGGER = logging.getLogger(__name__) + def setup_module(): os.environ['SCYLLA_EXT_OPTS'] = "--smp 2 --memory 2048M" use_cluster('shared_aware', [3], start=True) - +@local class TestUseKeyspace(unittest.TestCase): @classmethod def setup_class(cls): @@ -40,6 +33,7 @@ def setup_class(cls): cls.session = cls.cluster.connect() LOGGER.info(cls.cluster.is_shard_aware()) LOGGER.info(cls.cluster.shard_aware_stats()) + @classmethod def teardown_class(cls): cls.cluster.shutdown() @@ -57,6 +51,7 @@ def test_set_keyspace_slow_connection(self): # connections. original_set_keyspace_blocking = Connection.set_keyspace_blocking + def patched_set_keyspace_blocking(*args, **kwargs): time.sleep(1) return original_set_keyspace_blocking(*args, **kwargs) From 51b9b5113fe6beb6a81caa141ba8f8c5323957e2 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 13 Dec 2022 19:08:38 +0200 Subject: [PATCH 081/551] integration-tests: add cluster cleanup code code that would remove clusters when testing session is done in some cases in the testing matrix we leftover clusters are failing the next session of tests --- tests/integration/conftest.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 tests/integration/conftest.py diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000000..a4e32036a6 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,23 @@ +import os +import logging + +import pytest +from ccmlib.cluster_factory import ClusterFactory as CCMClusterFactory + +from . import CLUSTER_NAME, SINGLE_NODE_CLUSTER_NAME, MULTIDC_CLUSTER_NAME +from . import path as ccm_path + + +@pytest.fixture(scope="session", autouse=True) +def cleanup_clusters(): + + yield + + if not os.environ.get('DISABLE_CLUSTER_CLEANUP'): + for cluster_name in [CLUSTER_NAME, SINGLE_NODE_CLUSTER_NAME, MULTIDC_CLUSTER_NAME, 'shared_aware', 'sni_proxy']: + try: + cluster = CCMClusterFactory.load(ccm_path, cluster_name) + logging.debug("Using external CCM cluster {0}".format(cluster.name)) + cluster.clear() + except FileNotFoundError: + pass From 256065a24b4acb85961fa5e964e12e08ea4b1d1a Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 29 Nov 2022 16:14:17 +0000 Subject: [PATCH 082/551] docs: add license notice on every page --- docs/_templates/notice.html | 4 ++++ docs/index.rst | 11 ----------- 2 files changed, 4 insertions(+), 11 deletions(-) create mode 100644 docs/_templates/notice.html diff --git a/docs/_templates/notice.html b/docs/_templates/notice.html new file mode 100644 index 0000000000..1096058571 --- /dev/null +++ b/docs/_templates/notice.html @@ -0,0 +1,4 @@ +
+

© 2013-2017 DataStax

+

© 2016, The Apache Software Foundation. Apache®, Apache Cassandra®, Cassandra®, the Apache feather logo and the Apache Cassandra® Eye logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. No endorsement by The Apache Software Foundation is implied by the use of these marks.

+
diff --git a/docs/index.rst b/docs/index.rst index fed26e9fc9..f264f92d4a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -92,14 +92,3 @@ Reporting Issues ---------------- Please report any bugs and make any feature requests on the `Github project issues `_ - - -Copyright ---------- - -© 2013-2017 DataStax - -© 2016, The Apache Software Foundation. -Apache®, Apache Cassandra®, Cassandra®, the Apache feather logo and the Apache Cassandra® Eye logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. No endorsement by The Apache Software Foundation is implied by the use of these marks. - - From 39810ac7b81d3cf0df02bfd36654b765fac61b6c Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 29 Nov 2022 22:30:58 +0000 Subject: [PATCH 083/551] update notice text --- docs/_templates/notice.html | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/_templates/notice.html b/docs/_templates/notice.html index 1096058571..131c756861 100644 --- a/docs/_templates/notice.html +++ b/docs/_templates/notice.html @@ -1,4 +1,6 @@
-

© 2013-2017 DataStax

-

© 2016, The Apache Software Foundation. Apache®, Apache Cassandra®, Cassandra®, the Apache feather logo and the Apache Cassandra® Eye logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. No endorsement by The Apache Software Foundation is implied by the use of these marks.

+

+ScyllaDB Python Driver is available under the Apache v2 License. +ScyllaDB Python Driver is a fork from DataStax Python Driver. +See Copyright here.

From 0b21a636bdbedac954f4ec2e5669cafac5bf8a6d Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 29 Nov 2022 22:31:24 +0000 Subject: [PATCH 084/551] Add back copyright --- docs/index.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index f264f92d4a..c0e99b0a3c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -92,3 +92,11 @@ Reporting Issues ---------------- Please report any bugs and make any feature requests on the `Github project issues `_ + +Copyright +--------- + +© 2013-2017 DataStax + +© 2016, The Apache Software Foundation. +Apache®, Apache Cassandra®, Cassandra®, the Apache feather logo and the Apache Cassandra® Eye logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. No endorsement by The Apache Software Foundation is implied by the use of these marks. From 2b412fc7eb0ad726e151b46bfce49aba8d501f31 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 29 Nov 2022 22:31:55 +0000 Subject: [PATCH 085/551] Update index.rst --- docs/index.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index c0e99b0a3c..91a66f7aa5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -93,6 +93,7 @@ Reporting Issues Please report any bugs and make any feature requests on the `Github project issues `_ + Copyright --------- @@ -100,3 +101,4 @@ Copyright © 2016, The Apache Software Foundation. Apache®, Apache Cassandra®, Cassandra®, the Apache feather logo and the Apache Cassandra® Eye logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. No endorsement by The Apache Software Foundation is implied by the use of these marks. + From f983d4ab2e96b258dfd6a7de43b598b92d600f37 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 29 Nov 2022 22:32:10 +0000 Subject: [PATCH 086/551] Update index.rst --- docs/index.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 91a66f7aa5..db6d0880d0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -101,4 +101,3 @@ Copyright © 2016, The Apache Software Foundation. Apache®, Apache Cassandra®, Cassandra®, the Apache feather logo and the Apache Cassandra® Eye logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. No endorsement by The Apache Software Foundation is implied by the use of these marks. - From 11ada2961675f1d3df42d325b59c179af0e3992f Mon Sep 17 00:00:00 2001 From: David Garcia Date: Thu, 1 Dec 2022 09:26:56 +0000 Subject: [PATCH 087/551] Update docs/_templates/notice.html Co-authored-by: Tzach Livyatan --- docs/_templates/notice.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/_templates/notice.html b/docs/_templates/notice.html index 131c756861..a47acce544 100644 --- a/docs/_templates/notice.html +++ b/docs/_templates/notice.html @@ -1,6 +1,6 @@

ScyllaDB Python Driver is available under the Apache v2 License. -ScyllaDB Python Driver is a fork from DataStax Python Driver. +ScyllaDB Python Driver is a fork of DataStax Python Driver. See Copyright here.

From 82e8d4b77d0a3e9337650342b3c1deac0c6ab0d8 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Tue, 27 Dec 2022 20:14:55 +0300 Subject: [PATCH 088/551] Properly update an existing host if its IP address changes With the transition to track hosts by host ids, a change of IP address doesn't lead to host removal/addition. So we must properly update an existing host and make sure the old connection pool to this host is destroyed. Add a unit test. Fixes gh-198 --- cassandra/cluster.py | 11 ++++++++- cassandra/metadata.py | 7 ++++++ tests/unit/test_control_connection.py | 33 +++++++++++++++++++++++++-- 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9fc2042d2d..e37efd792c 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2114,7 +2114,7 @@ def on_remove(self, host): if self.is_shutdown: return - log.debug("Removing host %s", host) + log.debug("[cluster] Removing host %s", host) host.set_down() self.profile_manager.on_remove(host) for session in tuple(self.sessions): @@ -3918,6 +3918,15 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, datacenter = row.get("data_center") rack = row.get("rack") + if host is None: + host = self._cluster.metadata.get_host_by_host_id(host_id) + if host and host.endpoint != endpoint: + log.debug("[control connection] Updating host ip from %s to %s for (%s)", host.endpoint, endpoint, host_id) + old_endpoint = host.endpoint + host.endpoint = endpoint + self._cluster.metadata.update_host(host, old_endpoint) + self._cluster.on_down(host, is_host_addition=False, expect_host_to_be_down=True) + if host is None: log.debug("[control connection] Found new host to connect to: %s", endpoint) host, _ = self._cluster.add_host(endpoint, datacenter=datacenter, rack=rack, signal=True, refresh_nodes=False, host_id=host_id) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 7397365407..5f1cfa5beb 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -365,6 +365,13 @@ def get_host(self, endpoint_or_address, port=None): host_id = self._host_id_by_endpoint.get(endpoint_or_address) return self._hosts.get(host_id) + def get_host_by_host_id(self, host_id): + """ + Same as get_host() but use host_id for lookup. + """ + with self._hosts_lock: + return self._hosts.get(host_id) + def _get_host_by_address(self, address, port=None): for host in six.itervalues(self._hosts): if (host.broadcast_rpc_address == address and diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index f9d2e27c89..a4157fc493 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -62,6 +62,9 @@ def get_host(self, endpoint_or_address, port=None): host_id = self._host_id_by_endpoint.get(endpoint_or_address) return self.hosts.get(host_id) + def get_host_by_host_id(self, host_id): + return self.hosts.get(host_id) + def all_hosts(self): return self.hosts.values() @@ -121,7 +124,7 @@ def remove_host(self, host): def on_up(self, host): pass - def on_down(self, host, is_host_addition): + def on_down(self, host, is_host_addition, expect_host_to_be_down=False): self.down_host = host @@ -327,7 +330,7 @@ def refresh_and_validate_added_hosts(): del self.connection.peer_results[:] self.connection.peer_results.extend([ ["native_address", "native_port", "peer", "peer_port", "schema_version", "data_center", "rack", "tokens", "host_id"], - [["192.168.1.4", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"], "uuid6"], + [["192.168.1.4", 9042, "10.0.0.1", 7042, "a", "dc1", "rack1", ["1", "101", "201"], "uuid7"], # all others are invalid [None, 9042, None, 7040, "a", "dc1", "rack1", ["2", "102", "202"], "uuid2"], ["192.168.1.5", 9042, "10.0.0.2", 7040, "a", None, "rack1", ["2", "102", "202"], "uuid2"], @@ -336,6 +339,32 @@ def refresh_and_validate_added_hosts(): ["192.168.1.5", 9042, "10.0.0.2", 7040, "a", "dc1", "rack1", ["2", "102", "202"], None]]]) refresh_and_validate_added_hosts() + def test_change_ip(self): + """ + Tests node IPs are updated while the nodes themselves are not + removed or added when their IPs change (the node look up is based on + host id). + """ + del self.cluster.added_hosts[:] + del self.connection.peer_results[:] + + self.connection.peer_results.extend([ + ["rpc_address", "peer", "schema_version", "data_center", "rack", "tokens", "host_id"], + [["192.168.1.5", "10.0.0.5", "a", "dc1", "rack1", ["2", "102", "202"], 'uuid2'], + ["192.168.1.6", "10.0.0.6", "a", "dc1", "rack1", ["3", "103", "203"], 'uuid3']]]) + self.connection.wait_for_responses = Mock( + return_value=_node_meta_results( + self.connection.local_results, self.connection.peer_results)) + self.control_connection.refresh_node_list_and_token_map() + # all peers are updated + self.assertEqual(0, len(self.cluster.added_hosts)) + + assert self.cluster.metadata.get_host('192.168.1.5') + assert self.cluster.metadata.get_host('192.168.1.6') + + self.assertEqual(3, len(self.cluster.metadata.all_hosts())) + + def test_refresh_nodes_and_tokens_uses_preloaded_results_if_given(self): """ refresh_nodes_and_tokens uses preloaded results if given for shared table queries From c8f7f51c9df93f69a2655f4aa357c492fccf3516 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 28 Dec 2022 13:36:15 +0200 Subject: [PATCH 089/551] add integration test for ip change case test that change one ip address of a node, while a session is open, and waits for it to be manifested in the metadata, and that the new address is reachable --- .../workflows/integration-tests-python2.yml | 2 +- .github/workflows/integration-tests.yml | 2 +- tests/integration/conftest.py | 3 +- tests/integration/standard/test_ip_change.py | 55 +++++++++++++++++++ 4 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 tests/integration/standard/test_ip_change.py diff --git a/.github/workflows/integration-tests-python2.yml b/.github/workflows/integration-tests-python2.yml index ee2b835b3c..e06e5cb2cd 100644 --- a/.github/workflows/integration-tests-python2.yml +++ b/.github/workflows/integration-tests-python2.yml @@ -21,5 +21,5 @@ jobs: - name: Test with pytest run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py + ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py tests/integration/standard/test_ip_change.py # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index db8efb3125..669fc582c9 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -21,5 +21,5 @@ jobs: - name: Test with pytest run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py + ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py tests/integration/standard/test_ip_change.py # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index a4e32036a6..93e0a67518 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -14,7 +14,8 @@ def cleanup_clusters(): yield if not os.environ.get('DISABLE_CLUSTER_CLEANUP'): - for cluster_name in [CLUSTER_NAME, SINGLE_NODE_CLUSTER_NAME, MULTIDC_CLUSTER_NAME, 'shared_aware', 'sni_proxy']: + for cluster_name in [CLUSTER_NAME, SINGLE_NODE_CLUSTER_NAME, MULTIDC_CLUSTER_NAME, + 'shared_aware', 'sni_proxy', 'test_ip_change']: try: cluster = CCMClusterFactory.load(ccm_path, cluster_name) logging.debug("Using external CCM cluster {0}".format(cluster.name)) diff --git a/tests/integration/standard/test_ip_change.py b/tests/integration/standard/test_ip_change.py new file mode 100644 index 0000000000..a564d5b4af --- /dev/null +++ b/tests/integration/standard/test_ip_change.py @@ -0,0 +1,55 @@ +import os +import logging +import unittest + +from cassandra.cluster import ExecutionProfile +from cassandra.policies import WhiteListRoundRobinPolicy, ConstantReconnectionPolicy + +from tests.integration import use_cluster, get_node, get_cluster, local, TestCluster +from tests.util import wait_until_not_raised + +LOGGER = logging.getLogger(__name__) + + +def setup_module(): + os.environ['SCYLLA_EXT_OPTS'] = "--smp 2 --memory 2048M" + use_cluster('test_ip_change', [3], start=True) + + +@local +class TestIpAddressChange(unittest.TestCase): + @classmethod + def setup_class(cls): + cls.cluster = TestCluster(reconnection_policy=ConstantReconnectionPolicy(1)) + cls.session = cls.cluster.connect() + + @classmethod + def teardown_class(cls): + cls.cluster.shutdown() + + def test_change_address_during_live_session(self): + node3 = get_node(3) + + LOGGER.debug("Stop node3") + node3.stop() + + LOGGER.debug("Change IP address for node3") + ip_prefix = get_cluster().get_ipprefix() + new_ip = f'{ip_prefix}33' + node3.set_configuration_options(values={'listen_address': new_ip, 'rpc_address': new_ip, 'api_address': new_ip}) + node3.network_interfaces = {k: (new_ip, v[1]) for k, v in node3.network_interfaces.items()} + LOGGER.debug(f"Start node3 again with ip address {new_ip}") + node3.start(wait_for_binary_proto=True) + + def new_address_found(): + addresses = [host.endpoint.address for host in self.cluster.metadata.all_hosts()] + LOGGER.debug(addresses) + assert new_ip in addresses + + wait_until_not_raised(new_address_found, 0.5, 100) + + new_node_only = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([new_ip])) + self.cluster.add_execution_profile("new_node", new_node_only) + local_info = self.session.execute("SELECT * FROM system.local", execution_profile="new_node").one() + LOGGER.debug(local_info._asdict()) + assert local_info.broadcast_address == new_ip From 2b0aac549512bf00730ee2dffb7a3e0f41cd5833 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 28 Dec 2022 21:52:04 +0200 Subject: [PATCH 090/551] test_scylla_cloud.py: add cleanup to clusters seem like we are not calling `cluster.shutdown()` to clusters the test creates --- tests/integration/standard/test_ip_change.py | 18 ++++++++----- .../integration/standard/test_scylla_cloud.py | 26 ++++++++++++------- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/tests/integration/standard/test_ip_change.py b/tests/integration/standard/test_ip_change.py index a564d5b4af..e87c14a1df 100644 --- a/tests/integration/standard/test_ip_change.py +++ b/tests/integration/standard/test_ip_change.py @@ -3,7 +3,7 @@ import unittest from cassandra.cluster import ExecutionProfile -from cassandra.policies import WhiteListRoundRobinPolicy, ConstantReconnectionPolicy +from cassandra.policies import WhiteListRoundRobinPolicy from tests.integration import use_cluster, get_node, get_cluster, local, TestCluster from tests.util import wait_until_not_raised @@ -15,12 +15,11 @@ def setup_module(): os.environ['SCYLLA_EXT_OPTS'] = "--smp 2 --memory 2048M" use_cluster('test_ip_change', [3], start=True) - @local class TestIpAddressChange(unittest.TestCase): @classmethod def setup_class(cls): - cls.cluster = TestCluster(reconnection_policy=ConstantReconnectionPolicy(1)) + cls.cluster = TestCluster() cls.session = cls.cluster.connect() @classmethod @@ -42,7 +41,7 @@ def test_change_address_during_live_session(self): node3.start(wait_for_binary_proto=True) def new_address_found(): - addresses = [host.endpoint.address for host in self.cluster.metadata.all_hosts()] + addresses = [str(host.endpoint.address) for host in self.cluster.metadata.all_hosts()] LOGGER.debug(addresses) assert new_ip in addresses @@ -50,6 +49,11 @@ def new_address_found(): new_node_only = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy([new_ip])) self.cluster.add_execution_profile("new_node", new_node_only) - local_info = self.session.execute("SELECT * FROM system.local", execution_profile="new_node").one() - LOGGER.debug(local_info._asdict()) - assert local_info.broadcast_address == new_ip + + def new_node_connectable(): + LOGGER.info(self.cluster.shard_aware_stats()) + local_info = self.session.execute("SELECT * FROM system.local", execution_profile="new_node").one() + LOGGER.debug(local_info._asdict()) + assert local_info.broadcast_address == new_ip + + wait_until_not_raised(new_node_connectable, 0.5, 100) diff --git a/tests/integration/standard/test_scylla_cloud.py b/tests/integration/standard/test_scylla_cloud.py index 2106407ebf..422a66f318 100644 --- a/tests/integration/standard/test_scylla_cloud.py +++ b/tests/integration/standard/test_scylla_cloud.py @@ -57,12 +57,15 @@ def test_1_node_cluster(self): for connection_class in supported_connection_classes: logging.warning('testing with class: %s', connection_class.__name__) cluster = Cluster(scylla_cloud=config, connection_class=connection_class) - with cluster.connect() as session: - res = session.execute("SELECT * FROM system.local") - assert res.all() + try: + with cluster.connect() as session: + res = session.execute("SELECT * FROM system.local") + assert res.all() - assert len(cluster.metadata._hosts) == 1 - assert len(cluster.metadata._host_id_by_endpoint) == 1 + assert len(cluster.metadata._hosts) == 1 + assert len(cluster.metadata._host_id_by_endpoint) == 1 + finally: + cluster.shutdown() def test_3_node_cluster(self): self.ccm_cluster = use_cluster("sni_proxy", [3], start=False) @@ -72,8 +75,11 @@ def test_3_node_cluster(self): for connection_class in supported_connection_classes: logging.warning('testing with class: %s', connection_class.__name__) cluster = Cluster(scylla_cloud=config, connection_class=connection_class) - with cluster.connect() as session: - res = session.execute("SELECT * FROM system.local") - assert res.all() - assert len(cluster.metadata._hosts) == 3 - assert len(cluster.metadata._host_id_by_endpoint) == 3 + try: + with cluster.connect() as session: + res = session.execute("SELECT * FROM system.local") + assert res.all() + assert len(cluster.metadata._hosts) == 3 + assert len(cluster.metadata._host_id_by_endpoint) == 3 + finally: + cluster.shutdown() From 2e8b0d899d731284c7efb3e4721755e73e8ab635 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Fri, 30 Dec 2022 02:07:03 +0200 Subject: [PATCH 091/551] ip changes: cancel the ongoing reconnector if we don't cancel it, we can run into a case that it's currently running, and our replacement would just update the host, but the reconnector would keep trying to reconnect the old address. Ref: #199 --- cassandra/cluster.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index e37efd792c..d2acc7c9ee 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3925,6 +3925,9 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, old_endpoint = host.endpoint host.endpoint = endpoint self._cluster.metadata.update_host(host, old_endpoint) + reconnector = host.get_and_set_reconnection_handler(None) + if reconnector: + reconnector.cancel() self._cluster.on_down(host, is_host_addition=False, expect_host_to_be_down=True) if host is None: From ec0aab15b9b28cbb2a997152142ca95545591567 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 2 Jan 2023 17:24:29 +0200 Subject: [PATCH 092/551] Release 3.25.11 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 94de644dd8..7878369210 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 10) +__version_info__ = (3, 25, 11) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index ebe4acc6f6..d1bbb5ba33 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.10-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.25.10-scylla' +LATEST_VERSION = '3.25.11-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From dee95953b070b2f72ab564f48236466a3564aaba Mon Sep 17 00:00:00 2001 From: Stefano Rivera Date: Wed, 4 Jan 2023 21:55:21 -0800 Subject: [PATCH 093/551] HostFilterPolicyInitTest fix for Python 3.11 (#1131) The AttributeError message for a missing property setter changed in bpo-46730 (https://bugs.python.org/issue46730) --- tests/unit/test_policies.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index a31b4f4c1b..88db23daba 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1295,7 +1295,10 @@ def test_init_kwargs(self): )) def test_immutable_predicate(self): - expected_message_regex = "can't set attribute" + if sys.version_info >= (3, 11): + expected_message_regex = "has no setter" + else: + expected_message_regex = "can't set attribute" hfp = HostFilterPolicy(child_policy=Mock(name='child_policy'), predicate=Mock(name='predicate')) with self.assertRaisesRegexp(AttributeError, expected_message_regex): From 9164c3e906ccd16695b8bccc30c80349efc58f3f Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 11 Jan 2023 16:28:57 +0200 Subject: [PATCH 094/551] github actions: stop uploading to pypi from python2 flow cause of that upload the main py3 flow is marked as failed, since one of the artifacts is already uploaded by python2 --- .github/workflows/test-python2.yaml | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/.github/workflows/test-python2.yaml b/.github/workflows/test-python2.yaml index 532f02d084..da51f8e169 100644 --- a/.github/workflows/test-python2.yaml +++ b/.github/workflows/test-python2.yaml @@ -38,22 +38,3 @@ jobs: - uses: actions/upload-artifact@v2 with: path: dist/*.tar.gz - - upload_pypi: - needs: [build, test] - runs-on: ubuntu-20.04 - # upload to PyPI on every tag starting with 'v' - if: github.event_name == 'push' && endsWith(github.event.ref, 'scylla') - # alternatively, to publish when a GitHub Release is created, use the following rule: - # if: github.event_name == 'release' && github.event.action == 'published' - steps: - - uses: actions/download-artifact@v2 - with: - name: artifact - path: dist - - - uses: pypa/gh-action-pypi-publish@master - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} - From 033f594d69397220f5e451d9bed58ecad5a9b549 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 18 Jan 2023 11:51:15 +0100 Subject: [PATCH 095/551] Fix CI failures CI was failing because of this error: https://github.com/eventlet/eventlet/issues/781 This commit applies the recommended fix - moving to eventlet>=0.33.3 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 3c1382debe..887af99f9d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -9,7 +9,7 @@ twisted[tls]; python_version >= '3.5' or python_version < '3.0' twisted[tls]==19.2.1; python_version < '3.5' and python_version >= '3.0' gevent>=1.0; platform_machine != 'i686' and platform_machine != 'win32' gevent==20.5.0; platform_machine == 'i686' or platform_machine == 'win32' -eventlet +eventlet>=0.33.3 cython>=0.20,<0.30 ; python_version > '3.0' cython==0.23.1 ; python_version < '3.0' packaging From da026e78361963f363c28638547179e24c89935d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomek=20=C5=81asica?= Date: Tue, 24 Jan 2023 21:11:52 +0100 Subject: [PATCH 096/551] Handle "log gone" case in the end of _run_loop (#1133) If log is somehow gone and file exception due to the race mention in PYTHON-1266 it will also inevitably fail for the same reason after the loop so we need to catch the exception there as well. --- cassandra/io/asyncorereactor.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 681552e589..074c62f690 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -259,7 +259,13 @@ def _run_loop(self): break self._started = False - log.debug("Asyncore event loop ended") + try: + log.debug("Asyncore event loop ended") + except Exception: + # TODO: Remove when Python 2 support is removed + # PYTHON-1266. If our logger has disappeared, there's nothing we + # can do, so just log nothing. + pass def add_timer(self, timer): self._timers.add_timer(timer) From 3638de4ae878109c691e61742d6e8ba5d0a98ebf Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 24 Jan 2023 14:25:14 -0600 Subject: [PATCH 097/551] Minor refactor of prior commit --- cassandra/io/asyncorereactor.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 074c62f690..0abdbbfe0a 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -248,24 +248,21 @@ def _run_loop(self): try: self._loop_dispatcher.loop(self.timer_resolution) self._timers.service_timeouts() - except Exception: - try: - log.debug("Asyncore event loop stopped unexpectedly", exc_info=True) - except Exception: - # TODO: Remove when Python 2 support is removed - # PYTHON-1266. If our logger has disappeared, there's nothing we - # can do, so just log nothing. - pass + except Exception as exc: + self._maybe_log_debug("Asyncore event loop stopped unexpectedly", exc_info=exc) break self._started = False + self._maybe_log_debug("Asyncore event loop ended") + + def _maybe_log_debug(self, *args, **kwargs): try: - log.debug("Asyncore event loop ended") + log.debug(*args, **kwargs) except Exception: # TODO: Remove when Python 2 support is removed # PYTHON-1266. If our logger has disappeared, there's nothing we # can do, so just log nothing. - pass + pass def add_timer(self, timer): self._timers.add_timer(timer) From 1e9a3833d1932947861aa4b530d1be9cb8821e11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 25 Jan 2023 15:05:39 +0100 Subject: [PATCH 098/551] gitignore: add "venv" to ignored dirs This is very common name for Python's virtual environments, thus used commonly during development. Having it not ignored makes VSCode unable to process git changes in the project. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index d2e5116b32..4541d034f0 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ docs/poetry.lock tests/integration/ccm setuptools*.tar.gz setuptools*.egg +venv/ cassandra/*.c !cassandra/cmurmur3.c From 4bbfd2d2bd439c682361875302ccdb2a571121c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 25 Jan 2023 15:16:48 +0100 Subject: [PATCH 099/551] tests: Don't add StreamHandler to logger Adding this handler causes at least 2 problems: - pytest captures logging calls by itself, so every log is captured twice - once in stderr, once in stdlog - logging calls in atexit handlers fail - as they are trying to write to a closed stream. Original purpose of the code seems to be to allow log capture for nose, but we no longer use nose. It also specified log level and format - but those can be specified using pytest.ini (which this commit also adds), so the code can be removed now. --- pytest.ini | 4 ++++ tests/__init__.py | 7 ------- 2 files changed, 4 insertions(+), 7 deletions(-) create mode 100644 pytest.ini diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000000..70ce703622 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +log_format = %(asctime)s.%(msecs)03d %(levelname)s [%(module)s:%(lineno)s]: %(message)s +log_level = DEBUG +log_date_format = %Y-%m-%d %H:%M:%S \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py index 6d75a9d907..6ebce1d711 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -21,13 +21,6 @@ from concurrent.futures import ThreadPoolExecutor log = logging.getLogger() -log.setLevel('DEBUG') -# if nose didn't already attach a log handler, add one here -if not log.handlers: - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s [%(module)s:%(lineno)s]: %(message)s')) - log.addHandler(handler) - def is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: From cd1f5ed744ca5719c7b6fdb40bb8f830a486443a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 25 Jan 2023 15:21:52 +0100 Subject: [PATCH 100/551] tests: Ensure additional log handlers are removed In some places handlers added using logger.addHandler were not removed. Some tests manually added MockLoggingHandler instead of using `with` . This commit fixes those problems. --- tests/integration/__init__.py | 2 +- .../cqlengine/management/test_management.py | 21 +++++------ tests/integration/simulacron/test_cluster.py | 37 +++++++++---------- tests/integration/standard/test_query.py | 36 +++++++++--------- tests/integration/upgrade/__init__.py | 4 ++ 5 files changed, 50 insertions(+), 50 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index ef31ebdd33..b0ff9f8d8d 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -896,7 +896,7 @@ def __enter__(self): return self def __exit__(self, *args): - pass + self.logger.removeHandler(self) class BasicExistingKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index f37db5e51f..27f735027c 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -360,18 +360,15 @@ def test_sync_warnings(self): @test_category object_mapper """ - mock_handler = MockLoggingHandler() - logger = logging.getLogger(management.__name__) - logger.addHandler(mock_handler) - sync_table(BaseInconsistent) - sync_table(ChangedInconsistent) - self.assertTrue('differing from the model type' in mock_handler.messages.get('warning')[0]) - if CASSANDRA_VERSION >= Version('2.1'): - sync_type(DEFAULT_KEYSPACE, BaseInconsistentType) - mock_handler.reset() - sync_type(DEFAULT_KEYSPACE, ChangedInconsistentType) - self.assertTrue('differing from the model user type' in mock_handler.messages.get('warning')[0]) - logger.removeHandler(mock_handler) + with MockLoggingHandler().set_module_name(management.__name__) as mock_handler: + sync_table(BaseInconsistent) + sync_table(ChangedInconsistent) + self.assertTrue('differing from the model type' in mock_handler.messages.get('warning')[0]) + if CASSANDRA_VERSION >= Version('2.1'): + sync_type(DEFAULT_KEYSPACE, BaseInconsistentType) + mock_handler.reset() + sync_type(DEFAULT_KEYSPACE, ChangedInconsistentType) + self.assertTrue('differing from the model user type' in mock_handler.messages.get('warning')[0]) class TestIndexSetModel(Model): diff --git a/tests/integration/simulacron/test_cluster.py b/tests/integration/simulacron/test_cluster.py index f859a5dd05..dfbf6c0ec6 100644 --- a/tests/integration/simulacron/test_cluster.py +++ b/tests/integration/simulacron/test_cluster.py @@ -88,23 +88,20 @@ class DuplicateRpcTest(SimulacronCluster): connect = False def test_duplicate(self): - mock_handler = MockLoggingHandler() - logger = logging.getLogger(cassandra.cluster.__name__) - logger.addHandler(mock_handler) - address_column = "native_transport_address" if DSE_VERSION and DSE_VERSION > Version("6.0") else "rpc_address" - rows = [ - {"peer": "127.0.0.1", "data_center": "dc", "host_id": "dontcare1", "rack": "rack1", - "release_version": "3.11.4", address_column: "127.0.0.1", "schema_version": "dontcare", "tokens": "1"}, - {"peer": "127.0.0.2", "data_center": "dc", "host_id": "dontcare2", "rack": "rack1", - "release_version": "3.11.4", address_column: "127.0.0.2", "schema_version": "dontcare", "tokens": "2"}, - ] - prime_query(ControlConnection._SELECT_PEERS, rows=rows) - - cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) - session = cluster.connect(wait_for_all_pools=True) - - warnings = mock_handler.messages.get("warning") - self.assertEqual(len(warnings), 1) - self.assertTrue('multiple hosts with the same endpoint' in warnings[0]) - logger.removeHandler(mock_handler) - cluster.shutdown() + with MockLoggingHandler().set_module_name(cassandra.cluster.__name__) as mock_handler: + address_column = "native_transport_address" if DSE_VERSION and DSE_VERSION > Version("6.0") else "rpc_address" + rows = [ + {"peer": "127.0.0.1", "data_center": "dc", "host_id": "dontcare1", "rack": "rack1", + "release_version": "3.11.4", address_column: "127.0.0.1", "schema_version": "dontcare", "tokens": "1"}, + {"peer": "127.0.0.2", "data_center": "dc", "host_id": "dontcare2", "rack": "rack1", + "release_version": "3.11.4", address_column: "127.0.0.2", "schema_version": "dontcare", "tokens": "2"}, + ] + prime_query(ControlConnection._SELECT_PEERS, rows=rows) + + cluster = Cluster(protocol_version=PROTOCOL_VERSION, compression=False) + session = cluster.connect(wait_for_all_pools=True) + + warnings = mock_handler.messages.get("warning") + self.assertEqual(len(warnings), 1) + self.assertTrue('multiple hosts with the same endpoint' in warnings[0]) + cluster.shutdown() diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 70037f60d5..7eb4cd39c7 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -508,6 +508,9 @@ def setUp(self): self.mock_handler = MockLoggingHandler() logger = logging.getLogger(cluster.__name__) logger.addHandler(self.mock_handler) + + def tearDown(self): + logger.removeHandler(self.mock_handler) def test_prepare_on_all_hosts(self): """ @@ -1562,28 +1565,27 @@ def test_reprepare_after_host_is_down(self): @test_category query """ - mock_handler = MockLoggingHandler() - logger = logging.getLogger(cluster.__name__) - logger.addHandler(mock_handler) - get_node(1).stop(wait=True, gently=True, wait_other_notice=True) + with MockLoggingHandler().set_module_name(cluster.__name__) as mock_handler: + get_node(1).stop(wait=True, gently=True, wait_other_notice=True) - only_first = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(["127.0.0.1"])) - self.cluster.add_execution_profile("only_first", only_first) + only_first = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(["127.0.0.1"])) + self.cluster.add_execution_profile("only_first", only_first) - query = "SELECT v from {} WHERE k = ?".format(self.table_name) - prepared_statement = self.session.prepare(query, keyspace=self.ks_name) - prepared_statement_alternative = self.session.prepare(query, keyspace=self.alternative_ks) + query = "SELECT v from {} WHERE k = ?".format(self.table_name) + prepared_statement = self.session.prepare(query, keyspace=self.ks_name) + prepared_statement_alternative = self.session.prepare(query, keyspace=self.alternative_ks) - get_node(1).start(wait_for_binary_proto=True, wait_other_notice=True) + get_node(1).start(wait_for_binary_proto=True, wait_other_notice=True) - # We wait for cluster._prepare_all_queries to be called - time.sleep(5) - self.assertEqual(1, mock_handler.get_message_count('debug', 'Preparing all known prepared statements')) - results = self.session.execute(prepared_statement, (1,), execution_profile="only_first") - self.assertEqual(results[0], (1, )) + # We wait for cluster._prepare_all_queries to be called + time.sleep(5) + self.assertEqual(1, mock_handler.get_message_count('debug', 'Preparing all known prepared statements')) + + results = self.session.execute(prepared_statement, (1,), execution_profile="only_first") + self.assertEqual(results[0], (1, )) - results = self.session.execute(prepared_statement_alternative, (2,), execution_profile="only_first") - self.assertEqual(results[0], (2, )) + results = self.session.execute(prepared_statement_alternative, (2,), execution_profile="only_first") + self.assertEqual(results[0], (2, )) def test_prepared_not_found(self): """ diff --git a/tests/integration/upgrade/__init__.py b/tests/integration/upgrade/__init__.py index e307a3e3cc..a906f60566 100644 --- a/tests/integration/upgrade/__init__.py +++ b/tests/integration/upgrade/__init__.py @@ -78,6 +78,10 @@ def setUpClass(cls): cls.logger_handler = MockLoggingHandler() logger = logging.getLogger(cluster.__name__) logger.addHandler(cls.logger_handler) + + @classmethod + def tearDownClass(cls): + logger.removeHandler(cls.logger_handler) def _upgrade_step_setup(self): """ From bf5db5158e13467092193b345d419c2b327fc6d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 27 Jan 2023 12:44:34 +0100 Subject: [PATCH 101/551] tests/integration/cqlengine: move from nose to pytest We use pytest to run our tests, but cqlenginge inegration tests were using nose's setup_package / teardown_package functions which are not supported by pytest. This causes most of those tests to fail. This commit changes this to pytest's autouse fixture, increasing amount of passing tests in integration/cqlengine from 185 to 516. --- tests/integration/cqlengine/__init__.py | 18 +-------- tests/integration/cqlengine/conftest.py | 54 +++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 17 deletions(-) create mode 100644 tests/integration/cqlengine/conftest.py diff --git a/tests/integration/cqlengine/__init__.py b/tests/integration/cqlengine/__init__.py index cd8f031ed1..5b7d16c535 100644 --- a/tests/integration/cqlengine/__init__.py +++ b/tests/integration/cqlengine/__init__.py @@ -13,35 +13,19 @@ # limitations under the License. import os -import warnings import unittest from cassandra import ConsistencyLevel from cassandra.cqlengine import connection -from cassandra.cqlengine.management import create_keyspace_simple, drop_keyspace, CQLENG_ALLOW_SCHEMA_MANAGEMENT import cassandra -from tests.integration import get_server_versions, use_single_node, PROTOCOL_VERSION, CASSANDRA_IP, ALLOW_BETA_PROTOCOL +from tests.integration import get_server_versions, PROTOCOL_VERSION, CASSANDRA_IP, ALLOW_BETA_PROTOCOL DEFAULT_KEYSPACE = 'cqlengine_test' CQL_SKIP_EXECUTE = bool(os.getenv('CQL_SKIP_EXECUTE', False)) - -def setup_package(): - warnings.simplefilter('always') # for testing warnings, make sure all are let through - os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' - - use_single_node() - - setup_connection(DEFAULT_KEYSPACE) - create_keyspace_simple(DEFAULT_KEYSPACE, 1) - - -def teardown_package(): - connection.unregister_connection("default") - def is_prepend_reversed(): # do we have https://issues.apache.org/jira/browse/CASSANDRA-8733 ? ver, _ = get_server_versions() diff --git a/tests/integration/cqlengine/conftest.py b/tests/integration/cqlengine/conftest.py new file mode 100644 index 0000000000..b802d5f3d0 --- /dev/null +++ b/tests/integration/cqlengine/conftest.py @@ -0,0 +1,54 @@ +# Copyright ScyllaDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +import os + +import pytest + +from cassandra.cqlengine import connection +from cassandra.cqlengine.management import create_keyspace_simple, drop_keyspace, CQLENG_ALLOW_SCHEMA_MANAGEMENT +from tests.integration import use_single_node + +from . import setup_connection, DEFAULT_KEYSPACE + + +@pytest.fixture(scope='package', autouse=True) +def cqlengine_fixture(): + warnings.simplefilter('always') # for testing warnings, make sure all are let through + os.environ[CQLENG_ALLOW_SCHEMA_MANAGEMENT] = '1' + + use_single_node() + + setup_connection(DEFAULT_KEYSPACE) + create_keyspace_simple(DEFAULT_KEYSPACE, 1) + + yield + + drop_keyspace(DEFAULT_KEYSPACE) + connection.unregister_connection("default") From 506fbf4c9b453605a65fca0344bbf6673be73403 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 6 Feb 2023 15:41:12 +0100 Subject: [PATCH 102/551] tests/integration: Mark tests requiring collection indexes Some integration tests use indexes on non-frozen collections. Support for those was merged to Scylla, but will be available from Scylla 5.2. Mark the tests that require such indexes, so they are not run with Scylla < 5.2. --- tests/integration/__init__.py | 4 ++++ .../cqlengine/management/test_management.py | 3 ++- tests/integration/cqlengine/query/test_named.py | 4 ++-- .../integration/cqlengine/query/test_queryset.py | 16 +++++++++++++--- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index b0ff9f8d8d..7d6c1750ef 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -337,6 +337,7 @@ def _id_and_mark(f): lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater not supported') greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') protocolv6 = unittest.skipUnless(6 in get_supported_protocol_versions(), 'Protocol versions less than 6 are not supported') + greaterthancass20 = unittest.skipUnless(CASSANDRA_VERSION >= Version('2.1'), 'Cassandra version 2.1 or greater required') greaterthancass21 = unittest.skipUnless(CASSANDRA_VERSION >= Version('2.2'), 'Cassandra version 2.2 or greater required') greaterthanorequalcass30 = unittest.skipUnless(CASSANDRA_VERSION >= Version('3.0'), 'Cassandra version 3.0 or greater required') @@ -348,6 +349,7 @@ def _id_and_mark(f): lessthanorequalcass40 = unittest.skipUnless(CASSANDRA_VERSION <= Version('4.0-a'), 'Cassandra version less or equal to 4.0 required') lessthancass40 = unittest.skipUnless(CASSANDRA_VERSION < Version('4.0-a'), 'Cassandra version less than 4.0 required') lessthancass30 = unittest.skipUnless(CASSANDRA_VERSION < Version('3.0'), 'Cassandra version less then 3.0 required') + greaterthanorequaldse68 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.8'), "DSE 6.8 or greater required for this test") greaterthanorequaldse67 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.7'), "DSE 6.7 or greater required for this test") greaterthanorequaldse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION >= Version('6.0'), "DSE 6.0 or greater required for this test") @@ -356,6 +358,8 @@ def _id_and_mark(f): lessthandse51 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('5.1'), "DSE version less than 5.1 required") lessthandse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('6.0'), "DSE version less than 6.0 required") +requirescollectionindexes = unittest.skipUnless(SCYLLA_VERSION is None or Version(SCYLLA_VERSION.split(':')[1]) >= Version('5.2'), 'Test requires Scylla >= 5.2 or Cassandra') + pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 27f735027c..22c8e7f099 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -23,7 +23,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -from tests.integration import DSE_VERSION, PROTOCOL_VERSION, greaterthancass20, MockLoggingHandler, CASSANDRA_VERSION +from tests.integration import DSE_VERSION, PROTOCOL_VERSION, greaterthancass20, requirescollectionindexes, MockLoggingHandler, CASSANDRA_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import TestModel from cassandra.cqlengine.usertype import UserType @@ -426,6 +426,7 @@ def test_sync_index_case_sensitive(self): self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'second_key')) @greaterthancass20 + @requirescollectionindexes def test_sync_indexed_set(self): """ Tests that models that have container types with indices can be synced. diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index eb85bbbb85..9dee3055cd 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -27,7 +27,7 @@ from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage -from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthanorequalcass30 +from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthanorequalcass30, requirescollectionindexes class TestQuerySetOperation(BaseCassEngTestCase): @@ -118,7 +118,7 @@ def test_query_expression_where_clause_generation(self): self.assertIsInstance(where.operator, GreaterThanOrEqualOperator) self.assertEqual(where.value, 1) - +@requirescollectionindexes class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): @classmethod diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index ec5044b707..4901f011f5 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -39,7 +39,7 @@ from cassandra.util import uuid_from_time from cassandra.cqlengine.connection import get_session from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21, \ - greaterthanorequalcass30, TestCluster + greaterthanorequalcass30, TestCluster, requirescollectionindexes from tests.integration.cqlengine import execute_count, DEFAULT_KEYSPACE @@ -384,7 +384,7 @@ def tearDownClass(cls): drop_table(CustomIndexedTestModel) drop_table(TestMultiClusteringModel) - +@requirescollectionindexes class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): @execute_count(2) @@ -558,7 +558,7 @@ class NonEqualityFilteringModel(Model): num = qa.count() assert num == 1, num - +@requirescollectionindexes class TestQuerySetDistinct(BaseQuerySetUsage): @execute_count(1) @@ -597,6 +597,7 @@ def test_distinct_with_explicit_count(self): self.assertEqual(q.count(), 2) +@requirescollectionindexes class TestQuerySetOrdering(BaseQuerySetUsage): @execute_count(2) def test_order_by_success_case(self): @@ -645,6 +646,7 @@ def test_ordering_on_multiple_clustering_columns(self): assert [r.three for r in results] == [1, 2, 3, 4, 5] +@requirescollectionindexes class TestQuerySetSlicing(BaseQuerySetUsage): @execute_count(1) @@ -699,6 +701,7 @@ def test_negative_slicing(self): self.assertEqual(model.attempt_id, expect) +@requirescollectionindexes class TestQuerySetValidation(BaseQuerySetUsage): def test_primary_key_or_index_must_be_specified(self): @@ -780,6 +783,7 @@ def test_custom_indexed_field_can_be_queried(self): list(CustomIndexedTestModel.objects.filter(test_id=1, description='test')) +@requirescollectionindexes class TestQuerySetDelete(BaseQuerySetUsage): @execute_count(9) @@ -938,6 +942,7 @@ def test_success_case(self): assert '4' in datas +@requirescollectionindexes class TestInOperator(BaseQuerySetUsage): @execute_count(1) def test_kwarg_success_case(self): @@ -998,6 +1003,7 @@ class bool_model2(Model): @greaterthancass20 +@requirescollectionindexes class TestContainsOperator(BaseQuerySetUsage): @execute_count(6) @@ -1063,6 +1069,7 @@ def test_query_expression_success_case(self): self.assertEqual(q.count(), 0) +@requirescollectionindexes class TestValuesList(BaseQuerySetUsage): @execute_count(2) @@ -1075,6 +1082,7 @@ def test_values_list(self): assert item == 10 +@requirescollectionindexes class TestObjectsProperty(BaseQuerySetUsage): @execute_count(1) def test_objects_property_returns_fresh_queryset(self): @@ -1105,6 +1113,7 @@ class PagingTest(Model): assert len(results) == 2 +@requirescollectionindexes class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage): def test_default_timeout(self): with mock.patch.object(Session, 'execute') as mock_execute: @@ -1122,6 +1131,7 @@ def test_none_timeout(self): self.assertEqual(mock_execute.call_args[-1]['timeout'], None) +@requirescollectionindexes class DMLQueryTimeoutTestCase(BaseQuerySetUsage): def setUp(self): self.model = TestModel(test_id=1, attempt_id=1, description='timeout test') From b54f3b4a3626952439cfdd22c205f17894ebb287 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 6 Feb 2023 15:51:40 +0100 Subject: [PATCH 103/551] tests/integration: Allow other columns than '[applied]' in LWT Scylla returns not only `[applied]` column, but also the previous value of a row. Tests didn't allow it - this commit fixes the problem. --- tests/integration/cqlengine/test_ifexists.py | 24 +++++--------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/tests/integration/cqlengine/test_ifexists.py b/tests/integration/cqlengine/test_ifexists.py index 1189bc0ff5..2e9d4be7ed 100644 --- a/tests/integration/cqlengine/test_ifexists.py +++ b/tests/integration/cqlengine/test_ifexists.py @@ -105,17 +105,13 @@ def test_update_if_exists(self): with self.assertRaises(LWTException) as assertion: m.if_exists().update() - self.assertEqual(assertion.exception.existing, { - '[applied]': False, - }) + self.assertEqual(assertion.exception.existing.get('[applied]'), False) # queryset update with self.assertRaises(LWTException) as assertion: TestIfExistsModel.objects(id=uuid4()).if_exists().update(count=8) - self.assertEqual(assertion.exception.existing, { - '[applied]': False, - }) + self.assertEqual(assertion.exception.existing.get('[applied]'), False) @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_update_if_exists_success(self): @@ -142,9 +138,7 @@ def test_batch_update_if_exists_success(self): m = TestIfExistsModel(id=uuid4(), count=42) # Doesn't exist m.batch(b).if_exists().update() - self.assertEqual(assertion.exception.existing, { - '[applied]': False, - }) + self.assertEqual(assertion.exception.existing.get('[applied]'), False) q = TestIfExistsModel.objects(id=id) self.assertEqual(len(q), 1) @@ -198,17 +192,13 @@ def test_delete_if_exists(self): with self.assertRaises(LWTException) as assertion: m.if_exists().delete() - self.assertEqual(assertion.exception.existing, { - '[applied]': False, - }) + self.assertEqual(assertion.exception.existing.get('[applied]'), False) # queryset delete with self.assertRaises(LWTException) as assertion: TestIfExistsModel.objects(id=uuid4()).if_exists().delete() - self.assertEqual(assertion.exception.existing, { - '[applied]': False, - }) + self.assertEqual(assertion.exception.existing.get('[applied]'), False) @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_delete_if_exists_success(self): @@ -237,9 +227,7 @@ def test_batch_delete_if_exists_success(self): m = TestIfExistsModel(id=uuid4(), count=42) # Doesn't exist m.batch(b).if_exists().delete() - self.assertEqual(assertion.exception.existing, { - '[applied]': False, - }) + self.assertEqual(assertion.exception.existing.get('[applied]'), False) @unittest.skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0") def test_batch_delete_mixed(self): From e782f40c2e8f09a9d86414fe215d6e2e2eb3a8d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 6 Feb 2023 15:56:13 +0100 Subject: [PATCH 104/551] tests/integration: Mark tests requiring custom indexes One test requires custom indexes, which are not supported by Scylla. Mark the test as such,so it's not executed with Scylla. --- tests/integration/__init__.py | 1 + tests/integration/cqlengine/statements/test_base_statement.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7d6c1750ef..eb770cb099 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -359,6 +359,7 @@ def _id_and_mark(f): lessthandse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('6.0'), "DSE version less than 6.0 required") requirescollectionindexes = unittest.skipUnless(SCYLLA_VERSION is None or Version(SCYLLA_VERSION.split(':')[1]) >= Version('5.2'), 'Test requires Scylla >= 5.2 or Cassandra') +requirescustomindexes = unittest.skipUnless(SCYLLA_VERSION is None, 'Currently, Scylla does not support SASI or any other CUSTOM INDEX class.') pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 3b5be60520..0b48096f61 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -26,7 +26,7 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel from tests.integration.cqlengine import DEFAULT_KEYSPACE -from tests.integration import greaterthanorequalcass3_10, TestCluster +from tests.integration import greaterthanorequalcass3_10, requirescustomindexes, TestCluster from cassandra.cqlengine.connection import execute @@ -102,6 +102,7 @@ def test_insert_statement_execute(self): self.assertEqual(TestQueryUpdateModel.objects.count(), 0) @greaterthanorequalcass3_10 + @requirescustomindexes def test_like_operator(self): """ Test to verify the like operator works appropriately From 75d4c980c1172ceeaa76752b35bdf9293a27b4a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 6 Feb 2023 16:09:46 +0100 Subject: [PATCH 105/551] cqlengine: compaction strategy class Scylla compatibility When creating a table with compaction strategy class like `org.apache.cassandra.db.compaction.LeveledCompactionStrategy` it will be siltently renamed by Scylla to `LeveledCompactionStrategy`. This caused some tests to fail, so this commit accomodates this behaviour. --- cassandra/cqlengine/management.py | 14 +++++++++++--- .../management/test_compaction_settings.py | 12 ++++++++++-- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 536bde6349..5e49fb54e5 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -483,9 +483,17 @@ def _update_options(model, connection=None): else: try: for k, v in value.items(): - if existing_value[k] != v: - update_options[name] = value - break + # When creating table with compaction 'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy' in Scylla, + # it will be silently changed to 'class': 'LeveledCompactionStrategy' - same for at least SizeTieredCompactionStrategy, + # probably others too. We need to handle this case here. + if k == 'class' and name == 'compaction': + if existing_value[k] != v and existing_value[k] != v.split('.')[-1]: + update_options[name] = value + break + else: + if existing_value[k] != v: + update_options[name] = value + break except KeyError: update_options[name] = value diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index d5dea12744..152810636b 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -118,8 +118,16 @@ def _verify_options(self, table_meta, expected_options): for subname, subvalue in value.items(): attr = "'%s': '%s'" % (subname, subvalue) found_at = cql.find(attr, start) - self.assertTrue(found_at > start) - self.assertTrue(found_at < end) + # When creating table with compaction 'class': 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy' in Scylla, + # it will be silently changed to 'class': 'LeveledCompactionStrategy' - same for at least SizeTieredCompactionStrategy, + # probably others too. We need to handle this case here. + if found_at == -1 and name == 'compaction' and subname == 'class': + attr = "'%s': '%s'" % (subname, subvalue.split('.')[-1]) + found_at = cql.find(attr, start) + else: + + self.assertTrue(found_at > start) + self.assertTrue(found_at < end) def test_all_size_tiered_options(self): class AllSizeTieredOptionsModel(Model): From f8bf4d34761db082afc8e79fde535616196da733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 6 Feb 2023 16:12:46 +0100 Subject: [PATCH 106/551] test_batch_query.py: Fix warnings tests Some tests check that specific amout of warnings is emited, but warnings module deduplicates warnings by default, causing those tests to fail. This commits disables this filtering to fix the tests. --- tests/integration/cqlengine/test_batch_query.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 7b78fa9979..d809266e36 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -218,6 +218,7 @@ def my_callback(*args, **kwargs): call_history.append(args) with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") with BatchQuery() as batch: batch.add_callback(my_callback) batch.execute() @@ -243,6 +244,7 @@ def my_callback(*args, **kwargs): with patch('cassandra.cqlengine.query.BatchQuery.warn_multiple_exec', False): with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") with BatchQuery() as batch: batch.add_callback(my_callback) batch.execute() From 6a352c410f7894a6df91670b19ea3c62215411a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 6 Feb 2023 16:25:03 +0100 Subject: [PATCH 107/551] cluster.py: Hande LWT batches in SimpleStatement Batch statements can be executed in (at least) 2 ways. 1. Using BatchStatement 2. Using SimpleStatement with string like `BEGIN BATCH ...` The second way was not handled by `was_applied` property of ResultSet`. This caused some tests to fail. This commit fixes the problem. --- cassandra/cluster.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d2acc7c9ee..6385387ed1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -29,6 +29,7 @@ import logging from warnings import warn from random import random +import re import six from six.moves import filter, range, queue as Queue import socket @@ -5349,6 +5350,8 @@ def cancel_continuous_paging(self): except AttributeError: raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousdPagingOptions.") + batch_regex = re.compile('^\s*BEGIN\s+[a-zA-Z]*\s*BATCH') + @property def was_applied(self): """ @@ -5363,7 +5366,8 @@ def was_applied(self): if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory): raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,)) - is_batch_statement = isinstance(self.response_future.query, BatchStatement) + is_batch_statement = isinstance(self.response_future.query, BatchStatement) \ + or (isinstance(self.response_future.query, SimpleStatement) and self.batch_regex.match(self.response_future.query.query_string)) if is_batch_statement and (not self.column_names or self.column_names[0] != "[applied]"): raise RuntimeError("No LWT were present in the BatchStatement") From df49fdcc5696beb191409f711153f9aa9a858f8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 6 Feb 2023 16:26:19 +0100 Subject: [PATCH 108/551] Enable tests/integration/cqlengine in CI All the test should now pass so we can enable them. --- .github/workflows/integration-tests-python2.yml | 2 +- .github/workflows/integration-tests.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/integration-tests-python2.yml b/.github/workflows/integration-tests-python2.yml index e06e5cb2cd..bdcc878d15 100644 --- a/.github/workflows/integration-tests-python2.yml +++ b/.github/workflows/integration-tests-python2.yml @@ -21,5 +21,5 @@ jobs: - name: Test with pytest run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py tests/integration/standard/test_ip_change.py + ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py tests/integration/standard/test_ip_change.py tests/integration/cqlengine/ # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 669fc582c9..ca6e8a1c14 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -21,5 +21,5 @@ jobs: - name: Test with pytest run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py tests/integration/standard/test_ip_change.py + ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py tests/integration/standard/test_ip_change.py tests/integration/cqlengine/ # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py From d2515c45aa88d1efb9072a48853b260c07e9c07e Mon Sep 17 00:00:00 2001 From: Piotr Grabowski Date: Mon, 13 Feb 2023 17:36:01 +0100 Subject: [PATCH 109/551] docs: remove DataStax Astra documentation --- docs/cloud.rst | 91 -------------------------------------------------- 1 file changed, 91 deletions(-) delete mode 100644 docs/cloud.rst diff --git a/docs/cloud.rst b/docs/cloud.rst deleted file mode 100644 index acabe62993..0000000000 --- a/docs/cloud.rst +++ /dev/null @@ -1,91 +0,0 @@ -:orphan: - -Cloud ------ -Connecting -========== -To connect to a DataStax Astra cluster: - -1. Download the secure connect bundle from your Astra account. -2. Connect to your cluster with - -.. code-block:: python - - from cassandra.cluster import Cluster - from cassandra.auth import PlainTextAuthProvider - - cloud_config = { - 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip' - } - auth_provider = PlainTextAuthProvider(username='user', password='pass') - cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider) - session = cluster.connect() - -Cloud Config Options -==================== - -use_default_tempdir -+++++++++++++++++++ - -The secure connect bundle needs to be extracted to load the certificates into the SSLContext. -By default, the zip location is used as the base dir for the extraction. In some environments, -the zip location file system is read-only (e.g Azure Function). With *use_default_tempdir* set to *True*, -the default temporary directory of the system will be used as base dir. - -.. code:: python - - cloud_config = { - 'secure_connect_bundle': '/path/to/secure-connect-dbname.zip', - 'use_default_tempdir': True - } - ... - -Astra Differences -================== -In most circumstances, the client code for interacting with an Astra cluster will be the same as interacting with any other Cassandra cluster. The exceptions being: - -* A cloud configuration must be passed to a :class:`~.Cluster` instance via the `cloud` attribute (as demonstrated above). -* An SSL connection will be established automatically. Manual SSL configuration is not allowed, and using `ssl_context` or `ssl_options` will result in an exception. -* A :class:`~.Cluster`'s `contact_points` attribute should not be used. The cloud config contains all of the necessary contact information. -* If a consistency level is not specified for an execution profile or query, then :attr:`.ConsistencyLevel.LOCAL_QUORUM` will be used as the default. - - -Limitations -=========== - -Event loops -^^^^^^^^^^^ -Evenlet isn't yet supported for python 3.7+ due to an `issue in Eventlet `_. - - -CqlEngine -========= - -When using the object mapper, you can configure cqlengine with :func:`~.cqlengine.connection.set_session`: - -.. code:: python - - from cassandra.cqlengine import connection - ... - - c = Cluster(cloud={'secure_connect_bundle':'/path/to/secure-connect-test.zip'}, - auth_provider=PlainTextAuthProvider('user', 'pass')) - s = c.connect('myastrakeyspace') - connection.set_session(s) - ... - -If you are using some third-party libraries (flask, django, etc.), you might not be able to change the -configuration mechanism. For this reason, the `hosts` argument of the default -:func:`~.cqlengine.connection.setup` function will be ignored if a `cloud` config is provided: - -.. code:: python - - from cassandra.cqlengine import connection - ... - - connection.setup( - None, # or anything else - "myastrakeyspace", cloud={ - 'secure_connect_bundle':'/path/to/secure-connect-test.zip' - }, - auth_provider=PlainTextAuthProvider('user', 'pass')) From 2189bc5a54471a0c23ec6d8894a76ef0ade17eb8 Mon Sep 17 00:00:00 2001 From: Piotr Grabowski Date: Mon, 13 Feb 2023 17:36:25 +0100 Subject: [PATCH 110/551] docs: replace "Scylla Cloud" with "ScyllaDB Cloud" --- docs/index.rst | 2 +- docs/scylla_cloud.rst | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index db6d0880d0..f4c3797b38 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -51,7 +51,7 @@ Contents Some discussion on the driver's approach to working with timestamp, date, time types :doc:`scylla_cloud` - Connect to Scylla Cloud + Connect to ScyllaDB Cloud :doc:`CHANGELOG` Log of changes to the driver, organized by version. diff --git a/docs/scylla_cloud.rst b/docs/scylla_cloud.rst index 62aaf76433..b5eb6df798 100644 --- a/docs/scylla_cloud.rst +++ b/docs/scylla_cloud.rst @@ -1,5 +1,5 @@ -Scylla Cloud ------------- +ScyllaDB Cloud +-------------- -To connect to a `Scylla Cloud `_ cluster, go to the Cluster Connect page, Python example. +To connect to a `ScyllaDB Cloud `_ cluster, go to the Cluster Connect page, Python example. For best performance, make sure to use the Scylla Driver. From 473484abaec9943d922864701c36b9be808e98fb Mon Sep 17 00:00:00 2001 From: Piotr Grabowski Date: Mon, 13 Feb 2023 17:37:51 +0100 Subject: [PATCH 111/551] docs: add "Connect to ScyllaDB Cloud Serverless" Add a new documentation page about how to connect to ScyllaDB Cloud Serverless with Python Driver. --- docs/index.rst | 4 +++ docs/scylla_cloud_serverless.rst | 49 ++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 docs/scylla_cloud_serverless.rst diff --git a/docs/index.rst b/docs/index.rst index f4c3797b38..f8c618f837 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -53,6 +53,9 @@ Contents :doc:`scylla_cloud` Connect to ScyllaDB Cloud +:doc:`scylla_cloud_serverless` + Connect to ScyllaDB Cloud Serverless + :doc:`CHANGELOG` Log of changes to the driver, organized by version. @@ -79,6 +82,7 @@ Contents object_mapper dates_and_times scylla_cloud + scylla_cloud_serverless faq Getting Help diff --git a/docs/scylla_cloud_serverless.rst b/docs/scylla_cloud_serverless.rst new file mode 100644 index 0000000000..4e0bafd1b8 --- /dev/null +++ b/docs/scylla_cloud_serverless.rst @@ -0,0 +1,49 @@ +ScyllaDB Cloud Serverless +------------------------- + +With ScyllaDB Cloud, you can deploy `serverless databases `_. +The Python driver allows you to connect to a serverless database by utilizing the connection bundle you can download via the **Connect>Python** tab in the Cloud application. +The connection bundle is a YAML file with connection and credential information for your cluster. + +Connecting to a ScyllaDB Cloud serverless database is very similar to a standard connection to a ScyllaDB database. + +Here’s a short program that connects to a ScyllaDB Cloud serverless database and prints metadata about the cluster: + +.. code-block:: python + + from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT + from cassandra.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy + + PATH_TO_BUNDLE_YAML = '/file/downloaded/from/cloud/connect-bundle.yaml' + + + def get_cluster(): + profile = ExecutionProfile( + load_balancing_policy=TokenAwarePolicy( + DCAwareRoundRobinPolicy(local_dc='us-east-1') + ) + ) + + return Cluster( + execution_profiles={EXEC_PROFILE_DEFAULT: profile}, + scylla_cloud=PATH_TO_BUNDLE_YAML, + ) + + + print('Connecting to cluster') + cluster = get_cluster() + session = cluster.connect() + + print('Connected to cluster', cluster.metadata.cluster_name) + + print('Getting metadata') + for host in cluster.metadata.all_hosts(): + print('Datacenter: {}; Host: {}; Rack: {}'.format( + host.datacenter, host.address, host.rack) + ) + + cluster.shutdown() + +By providing the ``scylla_cloud`` parameter to the :class:`~.Cluster` constructor, +the driver can set up the connection based on the endpoint and credential information +stored in your downloaded ScyllaDB Cloud Serverless connection bundle. \ No newline at end of file From 57f7e3318ba68b8326600de6b17618f1189cca1b Mon Sep 17 00:00:00 2001 From: Piotr Grabowski Date: Wed, 15 Feb 2023 12:49:18 +0100 Subject: [PATCH 112/551] docs: fix building docs in CI Before this commit, building docs in CI failed with: AttributeError: module 'dns.rdtypes' has no attribute 'ANY' Pinning an older dnspython version fixes the problem. --- docs/pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/pyproject.toml b/docs/pyproject.toml index e9ffdd15d7..4cff92ee70 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -5,6 +5,7 @@ description = "ScyllaDB Python Driver Docs" authors = ["Python Driver Contributors"] [tool.poetry.dependencies] +dnspython = "2.2.1" eventlet = "0.25.2" futures = "2.2.0" geomet = "0.1.2" From 728c7126e1c7a09c6cd470cad2cba55fbbe49cb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Reis?= Date: Thu, 23 Feb 2023 16:43:39 +0000 Subject: [PATCH 113/551] Fix jenkins builds (#1134) * remove master node dependency * set git env vars "manually" * fix branch_name * disable concurrent builds * workaround dse versions --- Jenkinsfile | 85 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 32 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 9536f52aa7..0fdafb17d2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -35,12 +35,12 @@ slack = new Slack() // We also avoid cython since it's tested as part of the nightlies. matrices = [ "FULL": [ - "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], + "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'], "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] ], "DEVELOP": [ - "SERVER": ['2.1', '3.11', 'dse-6.8'], + "SERVER": ['2.1', '3.11', 'dse-6.8.30'], "RUNTIME": ['2.7.18', '3.6.10'], "CYTHON": ["True", "False"] ], @@ -50,20 +50,20 @@ matrices = [ "CYTHON": ["True", "False"] ], "DSE": [ - "SERVER": ['dse-5.0', 'dse-5.1', 'dse-6.0', 'dse-6.7', 'dse-6.8'], + "SERVER": ['dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'], "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], "CYTHON": ["True", "False"] ], "SMOKE": [ - "SERVER": ['3.11', '4.0', 'dse-6.8'], + "SERVER": ['3.11', '4.0', 'dse-6.8.30'], "RUNTIME": ['3.7.7', '3.8.3'], "CYTHON": ["False"] ] ] -def getBuildContext() { +def initializeSlackContext() { /* - Based on schedule, parameters and branch name, configure the build context and env vars. + Based on git branch/commit, configure the build context and env vars. */ def driver_display_name = 'Cassandra Python Driver' @@ -72,11 +72,17 @@ def getBuildContext() { } else if (env.GIT_URL.contains('python-dse-driver')) { driver_display_name = 'DSE Python Driver' } + env.DRIVER_DISPLAY_NAME = driver_display_name + env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" + env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" + env.GITHUB_BRANCH_URL = "${env.GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" + env.GITHUB_COMMIT_URL = "${env.GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" +} - def git_sha = "${env.GIT_COMMIT.take(7)}" - def github_project_url = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - def github_branch_url = "${github_project_url}/tree/${env.BRANCH_NAME}" - def github_commit_url = "${github_project_url}/commit/${env.GIT_COMMIT}" +def getBuildContext() { + /* + Based on schedule and parameters, configure the build context and env vars. + */ def profile = "${params.PROFILE}" def EVENT_LOOP = "${params.EVENT_LOOP.toLowerCase()}" @@ -116,9 +122,7 @@ def getBuildContext() { context = [ vars: [ "PROFILE=${profile}", - "EVENT_LOOP=${EVENT_LOOP}", - "DRIVER_DISPLAY_NAME=${driver_display_name}", "GIT_SHA=${git_sha}", "GITHUB_PROJECT_URL=${github_project_url}", - "GITHUB_BRANCH_URL=${github_branch_url}", "GITHUB_COMMIT_URL=${github_commit_url}" + "EVENT_LOOP=${EVENT_LOOP}" ], matrix: matrix ] @@ -152,7 +156,14 @@ def getMatrixBuilds(buildContext) { def cythonDesc = cythonFlag == "True" ? ", Cython": "" tasks["${serverVersion}, py${runtimeVersion}${cythonDesc}"] = { node("${OS_VERSION}") { - checkout scm + scm_variables = checkout scm + env.GIT_COMMIT = scm_variables.get('GIT_COMMIT') + env.GIT_URL = scm_variables.get('GIT_URL') + initializeSlackContext() + + if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { + slack.notifyChannel() + } withEnv(taskVars) { buildAndTest(context) @@ -203,6 +214,21 @@ def initializeEnvironment() { . ${CCM_ENVIRONMENT_SHELL} ${CASSANDRA_VERSION} ''' + if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { + env.DSE_FIXED_VERSION = env.CASSANDRA_VERSION.split('-')[1] + sh label: 'Update environment for DataStax Enterprise', script: '''#!/bin/bash -le + cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF +CCM_CASSANDRA_VERSION=${DSE_FIXED_VERSION} # maintain for backwards compatibility +CCM_VERSION=${DSE_FIXED_VERSION} +CCM_SERVER_TYPE=dse +DSE_VERSION=${DSE_FIXED_VERSION} +CCM_IS_DSE=true +CCM_BRANCH=${DSE_FIXED_VERSION} +DSE_BRANCH=${DSE_FIXED_VERSION} +ENVIRONMENT_EOF + ''' + } + sh label: 'Display Python and environment information', script: '''#!/bin/bash -le # Load CCM environment variables set -o allexport @@ -384,6 +410,7 @@ pipeline { // Global pipeline timeout options { + disableConcurrentBuilds() timeout(time: 10, unit: 'HOURS') // TODO timeout should be per build buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts numToKeepStr: '50')) // Keep only the last 50 build records @@ -486,11 +513,11 @@ pipeline { '3.0', // Previous Apache CassandraⓇ '3.11', // Current Apache CassandraⓇ '4.0', // Development Apache CassandraⓇ - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Previous DataStax Enterprise - 'dse-6.8', // Current DataStax Enterprise + 'dse-5.0.15', // Long Term Support DataStax Enterprise + 'dse-5.1.35', // Legacy DataStax Enterprise + 'dse-6.0.18', // Previous DataStax Enterprise + 'dse-6.7.17', // Previous DataStax Enterprise + 'dse-6.8.30', // Current DataStax Enterprise ], description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY!
DEFAULT Default to the build context.
SMOKEBasic smoke tests for current Python runtimes + C*/DSE versions, no Cython
FULL All server versions, python runtimes tested with and without Cython.
@@ -525,23 +552,23 @@ pipeline { - + - + - + - + - +
Apache CassandraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0dse-5.0.15 DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1dse-5.1.35 DataStax Enterprise v5.1.x
dse-6.0dse-6.0.18 DataStax Enterprise v6.0.x
dse-6.7dse-6.7.17 DataStax Enterprise v6.7.x
dse-6.8dse-6.8.30 DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') @@ -623,7 +650,7 @@ pipeline { parameterizedCron((scheduleTriggerJobName() == env.JOB_NAME) ? """ # Every weeknight (Monday - Friday) around 4:00 AM # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 + H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1.35 dse-6.0.18 dse-6.7.17 """ : "") } @@ -635,11 +662,6 @@ pipeline { stages { stage ('Build and Test') { - agent { - // // If I removed this agent block, GIT_URL and GIT_COMMIT aren't set. - // // However, this trigger an additional checkout - label "master" - } when { beforeAgent true allOf { @@ -651,8 +673,7 @@ pipeline { script { context = getBuildContext() withEnv(context.vars) { - describeBuild(context) - slack.notifyChannel() + describeBuild(context) // build and test all builds parallel getMatrixBuilds(context) From 02aa886946d3e308f0e646cba8b61bed7a85ea11 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Tue, 28 Feb 2023 14:22:05 -0600 Subject: [PATCH 114/551] Merge pull request #1128 from python-driver/python-1304 Contains fixes for PYTHON-1304 and PYTHON-1287, both of which describe test failures caused by recent changes to driver code --- cassandra/pool.py | 8 ++++++-- tests/integration/standard/test_cluster.py | 15 +++++++++++++-- tests/integration/util.py | 12 +++++++++--- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index c82dfe9a6b..d61e81cd0d 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -568,7 +568,9 @@ def get_state(self): connection = self._connection open_count = 1 if connection and not (connection.is_closed or connection.is_defunct) else 0 in_flights = [connection.in_flight] if connection else [] - return {'shutdown': self.is_shutdown, 'open_count': open_count, 'in_flights': in_flights} + orphan_requests = [connection.orphaned_request_ids] if connection else [] + return {'shutdown': self.is_shutdown, 'open_count': open_count, \ + 'in_flights': in_flights, 'orphan_requests': orphan_requests} @property def open_count(self): @@ -926,4 +928,6 @@ def get_connections(self): def get_state(self): in_flights = [c.in_flight for c in self._connections] - return {'shutdown': self.is_shutdown, 'open_count': self.open_count, 'in_flights': in_flights} + orphan_requests = [c.orphaned_request_ids for c in self._connections] + return {'shutdown': self.is_shutdown, 'open_count': self.open_count, \ + 'in_flights': in_flights, 'orphan_requests': orphan_requests} diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index a15c7f32e2..deceed58fd 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -16,7 +16,7 @@ from collections import deque from copy import copy -from mock import Mock, call, patch +from mock import Mock, call, patch, ANY import time from uuid import uuid4 import logging @@ -1478,7 +1478,18 @@ def test_prepare_on_ignored_hosts(self): # the length of mock_calls will vary, but all should use the unignored # address for c in cluster.connection_factory.mock_calls: - self.assertEqual(call(DefaultEndPoint(unignored_address)), c) + # PYTHON-1287 + # + # Cluster._prepare_all_queries() will call connection_factory _without_ the + # on_orphaned_stream_released arg introduced in commit + # 387150acc365b6cf1daaee58c62db13e4929099a. The reconnect handler for the + # downed node _will_ add this arg when it tries to rebuild it's conn pool, and + # whether this occurs while running this test amounts to a race condition. So + # to cover this case we assert one of two call styles here... the key is that + # the _only_ address we should see is the unignored_address. + self.assertTrue( \ + c == call(DefaultEndPoint(unignored_address)) or \ + c == call(DefaultEndPoint(unignored_address), on_orphaned_stream_released=ANY)) cluster.shutdown() diff --git a/tests/integration/util.py b/tests/integration/util.py index 6215449d1f..bcc4cb829b 100644 --- a/tests/integration/util.py +++ b/tests/integration/util.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from itertools import chain + from tests.integration import PROTOCOL_VERSION import time @@ -38,14 +40,18 @@ def assert_quiescent_pool_state(test_case, cluster, wait=None): for state in pool_states: test_case.assertFalse(state['shutdown']) test_case.assertGreater(state['open_count'], 0) - test_case.assertTrue(all((i == 0 for i in state['in_flights']))) + no_in_flight = all((i == 0 for i in state['in_flights'])) + orphans_and_inflights = zip(state['orphan_requests'],state['in_flights']) + all_orphaned = all((len(orphans) == inflight for (orphans,inflight) in orphans_and_inflights)) + test_case.assertTrue(no_in_flight or all_orphaned) for holder in cluster.get_connection_holders(): for connection in holder.get_connections(): # all ids are unique req_ids = connection.request_ids + orphan_ids = connection.orphaned_request_ids test_case.assertEqual(len(req_ids), len(set(req_ids))) - test_case.assertEqual(connection.highest_request_id, len(req_ids) - 1) - test_case.assertEqual(connection.highest_request_id, max(req_ids)) + test_case.assertEqual(connection.highest_request_id, len(req_ids) + len(orphan_ids) - 1) + test_case.assertEqual(connection.highest_request_id, max(chain(req_ids, orphan_ids))) if PROTOCOL_VERSION < 3: test_case.assertEqual(connection.highest_request_id, connection.max_request_id) From b0030194e3e3c5c2a932315b2282dab75ec23f67 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Wed, 8 Mar 2023 09:55:53 -0600 Subject: [PATCH 115/551] Merge pull request #1137 from python-driver/python-1329 PYTHON-1329 Change expected port numbers if use_single_interface is used --- tests/integration/__init__.py | 19 +++++++++++++++---- .../standard/test_single_interface.py | 4 ++-- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index d3c3332649..a344931a4e 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -52,6 +52,14 @@ SINGLE_NODE_CLUSTER_NAME = 'single_node' MULTIDC_CLUSTER_NAME = 'multidc_test_cluster' +# When use_single_interface is specified ccm will assign distinct port numbers to each +# node in the cluster. This value specifies the default port value used for the first +# node that comes up. +# +# TODO: In the future we may want to make this configurable, but this should only apply +# if a non-standard port were specified when starting up the cluster. +DEFAULT_SINGLE_INTERFACE_PORT=9046 + CCM_CLUSTER = None path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ccm') @@ -593,7 +601,10 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, wait_for_node_socket(node, 300) log.debug("Binary ports are open") if set_keyspace: - setup_keyspace(ipformat=ipformat) + args = {"ipformat": ipformat} + if use_single_interface: + args["port"] = DEFAULT_SINGLE_INTERFACE_PORT + setup_keyspace(**args) except Exception: log.exception("Failed to start CCM cluster; removing cluster.") @@ -692,7 +703,7 @@ def drop_keyspace_shutdown_cluster(keyspace_name, session, cluster): cluster.shutdown() -def setup_keyspace(ipformat=None, wait=True, protocol_version=None): +def setup_keyspace(ipformat=None, wait=True, protocol_version=None, port=9042): # wait for nodes to startup if wait: time.sleep(10) @@ -703,9 +714,9 @@ def setup_keyspace(ipformat=None, wait=True, protocol_version=None): _protocol_version = PROTOCOL_VERSION if not ipformat: - cluster = TestCluster(protocol_version=_protocol_version) + cluster = TestCluster(protocol_version=_protocol_version, port=port) else: - cluster = TestCluster(contact_points=["::1"], protocol_version=_protocol_version) + cluster = TestCluster(contact_points=["::1"], protocol_version=_protocol_version, port=port) session = cluster.connect() try: diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index 4677eff641..ffd2bbe9c4 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -22,7 +22,7 @@ from packaging.version import Version from tests.integration import use_singledc, PROTOCOL_VERSION, \ remove_cluster, greaterthanorequalcass40, notdse, \ - CASSANDRA_VERSION, DSE_VERSION, TestCluster + CASSANDRA_VERSION, DSE_VERSION, TestCluster, DEFAULT_SINGLE_INTERFACE_PORT def setup_module(): @@ -39,7 +39,7 @@ def teardown_module(): class SingleInterfaceTest(unittest.TestCase): def setUp(self): - self.cluster = TestCluster() + self.cluster = TestCluster(port=DEFAULT_SINGLE_INTERFACE_PORT) self.session = self.cluster.connect() def tearDown(self): From ff704d6a225c10de270c47bf68d1c1559ba7839e Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 9 Mar 2023 15:46:56 -0600 Subject: [PATCH 116/551] Merge pull request #1139 from python-driver/python-1328 PYTHON-1328: Add explicit wait to give cluster time to get initialized --- tests/integration/standard/test_authentication.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 189da45c94..b055bc75ec 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -12,14 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from packaging.version import Version import logging import time from cassandra.cluster import NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider -from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, CASSANDRA_IP, \ - USE_CASS_EXTERNAL, start_cluster_wait_for_up, TestCluster +from tests.integration import use_singledc, get_cluster, remove_cluster, PROTOCOL_VERSION, \ + CASSANDRA_IP, CASSANDRA_VERSION, USE_CASS_EXTERNAL, start_cluster_wait_for_up, TestCluster from tests.integration.util import assert_quiescent_pool_state import unittest @@ -42,12 +43,19 @@ def setup_module(): log.debug("Starting ccm test cluster with %s", config_options) start_cluster_wait_for_up(ccm_cluster) + # PYTHON-1328 + # + # Give the cluster enough time to startup (and perform necessary initialization) + # before executing the test. + if CASSANDRA_VERSION > Version('4.0-a'): + time.sleep(10) def teardown_module(): remove_cluster() # this test messes with config class AuthenticationTests(unittest.TestCase): + """ Tests to cover basic authentication functionality """ @@ -86,6 +94,7 @@ def cluster_as(self, usr, pwd): raise Exception('Unable to connect with creds: {}/{}'.format(usr, pwd)) def test_auth_connect(self): + user = 'u' passwd = 'password' From ffe0097505b96b61c5fd2ec626aa934caf397873 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 9 Mar 2023 15:48:35 -0600 Subject: [PATCH 117/551] Merge pull request #1140 from python-driver/python-1327 PYTHON-1327: Add annotation to note server-side fix for certain C* versions --- tests/integration/standard/test_prepared_statements.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_prepared_statements.py b/tests/integration/standard/test_prepared_statements.py index 1ed48d2964..a643b19c07 100644 --- a/tests/integration/standard/test_prepared_statements.py +++ b/tests/integration/standard/test_prepared_statements.py @@ -13,9 +13,12 @@ # limitations under the License. -from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster +from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster, CASSANDRA_VERSION import unittest + +from packaging.version import Version + from cassandra import InvalidRequest, DriverException from cassandra import ConsistencyLevel, ProtocolVersion @@ -392,6 +395,9 @@ def test_raise_error_on_prepared_statement_execution_dropped_table(self): with self.assertRaises(InvalidRequest): self.session.execute(prepared, [0]) + @unittest.skipIf((CASSANDRA_VERSION >= Version('3.11.12') and CASSANDRA_VERSION < Version('4.0')) or \ + CASSANDRA_VERSION >= Version('4.0.2'), + "Fixed server-side in Cassandra 3.11.12, 4.0.2") def test_fail_if_different_query_id_on_reprepare(self): """ PYTHON-1124 and CASSANDRA-15252 """ keyspace = "test_fail_if_different_query_id_on_reprepare" From 4da7001b38e65e8d578d0b71b37ef7be3a618c2e Mon Sep 17 00:00:00 2001 From: Karthikeyan Singaravelan Date: Fri, 10 Mar 2023 03:39:01 +0530 Subject: [PATCH 118/551] Refactor deprecated unittest aliases for Python 3.11 compatibility. (#1112) --- .../cqlengine/management/test_compaction_settings.py | 6 +++--- .../integration/cqlengine/management/test_management.py | 3 ++- .../cqlengine/model/test_class_construction.py | 3 ++- tests/integration/cqlengine/test_batch_query.py | 3 ++- tests/integration/long/test_ipv6.py | 5 +++-- tests/integration/simulacron/test_connection.py | 3 ++- tests/integration/standard/test_authentication.py | 9 +++++---- tests/integration/standard/test_client_warnings.py | 9 +++++---- tests/integration/standard/test_cluster.py | 7 ++++--- tests/integration/standard/test_metadata.py | 6 +++--- tests/integration/standard/test_single_interface.py | 2 +- tests/integration/standard/test_types.py | 6 +++--- tests/unit/advanced/test_graph.py | 2 +- tests/unit/cqlengine/test_connection.py | 6 ++++-- tests/unit/test_connection.py | 2 +- tests/unit/test_control_connection.py | 4 ++-- tests/unit/test_policies.py | 2 +- tests/unit/test_protocol.py | 3 ++- tests/unit/test_response_future.py | 4 +++- tests/unit/test_timestamps.py | 3 ++- 20 files changed, 51 insertions(+), 37 deletions(-) diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index d5dea12744..673bda29a7 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -83,7 +83,7 @@ def test_alter_actually_alters(self): table_meta = _get_table_metadata(tmp) - self.assertRegexpMatches(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') + six.assertRegex(self, table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') def test_alter_options(self): @@ -97,11 +97,11 @@ class AlterTable(Model): drop_table(AlterTable) sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") + six.assertRegex(self, table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") AlterTable.__options__['compaction']['sstable_size_in_mb'] = '128' sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - self.assertRegexpMatches(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") + six.assertRegex(self, table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") class OptionsTest(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index f37db5e51f..67f87b10e4 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -13,6 +13,7 @@ # limitations under the License. import unittest +import six import mock import logging from packaging.version import Version @@ -261,7 +262,7 @@ def test_bogus_option_update(self): option = 'no way will this ever be an option' try: ModelWithTableProperties.__options__[option] = 'what was I thinking?' - self.assertRaisesRegexp(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) + six.assertRaisesRegex(self, KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) finally: ModelWithTableProperties.__options__.pop(option, None) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 9c5afecbfc..95ba1f49bd 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -15,6 +15,7 @@ from uuid import uuid4 import warnings +import six from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine.models import Model, ModelException, ModelDefinitionException, ColumnQueryEvaluator from cassandra.cqlengine.query import ModelQuerySet, DMLQuery @@ -91,7 +92,7 @@ def test_attempting_to_make_duplicate_column_names_fails(self): Tests that trying to create conflicting db column names will fail """ - with self.assertRaisesRegexp(ModelException, r".*more than once$"): + with six.assertRaisesRegex(self, ModelException, r".*more than once$"): class BadNames(Model): words = columns.Text(primary_key=True) content = columns.Text(db_field='words') diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 7b78fa9979..07ee2e13bf 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -13,6 +13,7 @@ # limitations under the License. import warnings +import six import sure from cassandra.cqlengine import columns @@ -223,7 +224,7 @@ def my_callback(*args, **kwargs): batch.execute() batch.execute() self.assertEqual(len(w), 2) # package filter setup to warn always - self.assertRegexpMatches(str(w[0].message), r"^Batch.*multiple.*") + six.assertRegex(self, str(w[0].message), r"^Batch.*multiple.*") def test_disable_multiple_callback_warning(self): """ diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index b63fdebcf3..6c7d447dfb 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -13,6 +13,7 @@ # limitations under the License. import os, socket, errno +import six from ccmlib import common from cassandra.cluster import NoHostAvailable @@ -82,7 +83,7 @@ def test_connect(self): def test_error(self): cluster = TestCluster(connection_class=self.connection_class, contact_points=['::1'], port=9043, connect_timeout=10) - self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' + six.assertRaisesRegex(self, NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' % errno.ECONNREFUSED, cluster.connect) def test_error_multiple(self): @@ -90,7 +91,7 @@ def test_error_multiple(self): raise unittest.SkipTest('localhost only resolves one address') cluster = TestCluster(connection_class=self.connection_class, contact_points=['localhost'], port=9043, connect_timeout=10) - self.assertRaisesRegexp(NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', + six.assertRaisesRegex(self, NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', cluster.connect) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 0c70d0a1e9..e34e69f458 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -14,6 +14,7 @@ import unittest import logging +import six import time from mock import Mock, patch @@ -262,7 +263,7 @@ def connection_factory(self, *args, **kwargs): prime_request(PrimeOptions(then={"result": "no_result", "delay_in_ms": never})) prime_request(RejectConnections("unbind")) - self.assertRaisesRegexp(OperationTimedOut, "Connection defunct by heartbeat", future.result) + six.assertRaisesRegex(self, OperationTimedOut, "Connection defunct by heartbeat", future.result) def test_close_when_query(self): """ diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index b055bc75ec..c23c9eedf2 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -16,6 +16,7 @@ import logging import time +import six from cassandra.cluster import NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider @@ -121,7 +122,7 @@ def test_auth_connect(self): def test_connect_wrong_pwd(self): cluster = self.cluster_as('cassandra', 'wrong_pass') try: - self.assertRaisesRegexp(NoHostAvailable, + six.assertRaisesRegex(self, NoHostAvailable, '.*AuthenticationFailed.', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -131,7 +132,7 @@ def test_connect_wrong_pwd(self): def test_connect_wrong_username(self): cluster = self.cluster_as('wrong_user', 'cassandra') try: - self.assertRaisesRegexp(NoHostAvailable, + six.assertRaisesRegex(self, NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -141,7 +142,7 @@ def test_connect_wrong_username(self): def test_connect_empty_pwd(self): cluster = self.cluster_as('Cassandra', '') try: - self.assertRaisesRegexp(NoHostAvailable, + six.assertRaisesRegex(self, NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -151,7 +152,7 @@ def test_connect_empty_pwd(self): def test_connect_no_auth_provider(self): cluster = TestCluster() try: - self.assertRaisesRegexp(NoHostAvailable, + six.assertRaisesRegex(self, NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 5f63b5265a..166f172a16 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -15,6 +15,7 @@ import unittest +import six from cassandra.query import BatchStatement from tests.integration import use_singledc, PROTOCOL_VERSION, local, TestCluster @@ -70,7 +71,7 @@ def test_warning_basic(self): future = self.session.execute_async(self.warn_batch) future.result() self.assertEqual(len(future.warnings), 1) - self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') def test_warning_with_trace(self): """ @@ -86,7 +87,7 @@ def test_warning_with_trace(self): future = self.session.execute_async(self.warn_batch, trace=True) future.result() self.assertEqual(len(future.warnings), 1) - self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) @local @@ -105,7 +106,7 @@ def test_warning_with_custom_payload(self): future = self.session.execute_async(self.warn_batch, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) - self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') self.assertDictEqual(future.custom_payload, payload) @local @@ -124,6 +125,6 @@ def test_warning_with_trace_and_custom_payload(self): future = self.session.execute_async(self.warn_batch, trace=True, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) - self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') + six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) self.assertDictEqual(future.custom_payload, payload) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index deceed58fd..c5f64f6c28 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -23,6 +23,7 @@ import warnings from packaging.version import Version +import six import cassandra from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT, ControlConnection, Cluster from cassandra.concurrent import execute_concurrent @@ -147,7 +148,7 @@ def test_raise_error_on_control_connection_timeout(self): get_node(1).pause() cluster = TestCluster(contact_points=['127.0.0.1'], connect_timeout=1) - with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): + with six.assertRaisesRegex(self, NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): cluster.connect() cluster.shutdown() @@ -535,7 +536,7 @@ def patched_wait_for_responses(*args, **kwargs): # cluster agreement wait used for refresh original_meta = c.metadata.keyspaces start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) + six.assertRaisesRegex(self, Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) self.assertIs(original_meta, c.metadata.keyspaces) @@ -572,7 +573,7 @@ def patched_wait_for_responses(*args, **kwargs): # refresh wait overrides cluster value original_meta = c.metadata.keyspaces start_time = time.time() - self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, + six.assertRaisesRegex(self, Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, max_schema_agreement_wait=agreement_timeout) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index e20f1f0640..6f76c2a9b0 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1590,7 +1590,7 @@ def test_function_no_parameters(self): with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name']) + six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name']) def test_functions_follow_keyspace_alter(self): """ @@ -1638,12 +1638,12 @@ def test_function_cql_called_on_null(self): kwargs['called_on_null_input'] = True with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") + six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") kwargs['called_on_null_input'] = False with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - self.assertRegexpMatches(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") + six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") class AggregateMetadata(FunctionTest): diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index ffd2bbe9c4..8d407be958 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -71,4 +71,4 @@ def test_single_interface(self): consistency_level=ConsistencyLevel.ALL)) for pool in self.session.get_pools(): - self.assertEquals(1, pool.get_state()['open_count']) + self.assertEqual(1, pool.get_state()['open_count']) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index f69e88c64f..828f10b5e2 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -69,7 +69,7 @@ def test_can_insert_blob_type_as_string(self): msg = r'.*Invalid STRING constant \(.*?\) for "b" of type blob.*' else: msg = r'.*Invalid STRING constant \(.*?\) for b of type blob.*' - self.assertRaisesRegexp(InvalidRequest, msg, s.execute, query, params) + six.assertRaisesRegex(self, InvalidRequest, msg, s.execute, query, params) return # In python2, with Cassandra < 2.0, we can manually encode the 'byte str' type as hex for insertion in a blob. @@ -1060,7 +1060,7 @@ def _daterange_round_trip(self, to_insert, expected=None): results = self.session.execute(prep_sel) dr = results[0].dr - # sometimes this is truncated in the assertEquals output on failure; + # sometimes this is truncated in the assertEqual output on failure; if isinstance(expected, six.string_types): self.assertEqual(str(dr), expected) else: @@ -1114,7 +1114,7 @@ def _daterange_round_trip(self, to_insert, expected=None): results= self.session.execute("SELECT * FROM tab WHERE dr = '{0}' ".format(to_insert)) dr = results[0].dr - # sometimes this is truncated in the assertEquals output on failure; + # sometimes this is truncated in the assertEqual output on failure; if isinstance(expected, six.string_types): self.assertEqual(str(dr), expected) else: diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index 25dd289dba..77a920a3bf 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -259,7 +259,7 @@ def test_init_unknown_kwargs(self): with warnings.catch_warnings(record=True) as w: GraphOptions(unknown_param=42) self.assertEqual(len(w), 1) - self.assertRegexpMatches(str(w[0].message), r"^Unknown keyword.*GraphOptions.*") + six.assertRegex(self, str(w[0].message), r"^Unknown keyword.*GraphOptions.*") def test_update(self): opts = GraphOptions(**self.api_params) diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 8e3a0b75bd..9c3454796a 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -14,6 +14,8 @@ import unittest +import six + from cassandra.cluster import _ConfigMode from cassandra.cqlengine import connection from cassandra.query import dict_factory @@ -50,12 +52,12 @@ def test_get_session_fails_without_existing_connection(self): """ Users can't get the default session without having a default connection set. """ - with self.assertRaisesRegexp(connection.CQLEngineException, self.no_registered_connection_msg): + with six.assertRaisesRegex(self, connection.CQLEngineException, self.no_registered_connection_msg): connection.get_session(connection=None) def test_get_cluster_fails_without_existing_connection(self): """ Users can't get the default cluster without having a default connection set. """ - with self.assertRaisesRegexp(connection.CQLEngineException, self.no_registered_connection_msg): + with six.assertRaisesRegex(self, connection.CQLEngineException, self.no_registered_connection_msg): connection.get_cluster(connection=None) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index f06b67ebe0..97faa5e7fc 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -392,7 +392,7 @@ def send_msg(msg, req_id, msg_callback): connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, ConnectionException) - self.assertRegexpMatches(exc.args[0], r'^Received unexpected response to OptionsMessage.*') + six.assertRegex(self, exc.args[0], r'^Received unexpected response to OptionsMessage.*') holder.return_connection.assert_has_calls( [call(connection)] * get_holders.call_count) diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 276b2849ca..53a5d6affc 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -526,7 +526,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_port(self): self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_address, "192.168.1.3") self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_port, 555) self.assertEqual(self.cluster.added_hosts[0].broadcast_address, "10.0.0.3") - self.assertEquals(self.cluster.added_hosts[0].broadcast_port, 666) + self.assertEqual(self.cluster.added_hosts[0].broadcast_port, 666) self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1") self.assertEqual(self.cluster.added_hosts[0].rack, "rack1") @@ -546,7 +546,7 @@ def test_refresh_nodes_and_tokens_add_host_detects_invalid_port(self): self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_address, "192.168.1.3") self.assertEqual(self.cluster.added_hosts[0].broadcast_rpc_port, None) self.assertEqual(self.cluster.added_hosts[0].broadcast_address, "10.0.0.3") - self.assertEquals(self.cluster.added_hosts[0].broadcast_port, None) + self.assertEqual(self.cluster.added_hosts[0].broadcast_port, None) self.assertEqual(self.cluster.added_hosts[0].datacenter, "dc1") self.assertEqual(self.cluster.added_hosts[0].rack, "rack1") diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 88db23daba..edafb7cb01 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1301,7 +1301,7 @@ def test_immutable_predicate(self): expected_message_regex = "can't set attribute" hfp = HostFilterPolicy(child_policy=Mock(name='child_policy'), predicate=Mock(name='predicate')) - with self.assertRaisesRegexp(AttributeError, expected_message_regex): + with six.assertRaisesRegex(self, AttributeError, expected_message_regex): hfp.predicate = object() diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 95a7a12b11..3d6828bdc5 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -14,6 +14,7 @@ import unittest +import six from mock import Mock from cassandra import ProtocolVersion, UnsupportedOperation @@ -172,7 +173,7 @@ def test_keyspace_flag_raises_before_v5(self): keyspace_message = QueryMessage('a', consistency_level=3, keyspace='ks') io = Mock(name='io') - with self.assertRaisesRegexp(UnsupportedOperation, 'Keyspaces.*set'): + with six.assertRaisesRegex(self, UnsupportedOperation, 'Keyspaces.*set'): keyspace_message.send_body(io, protocol_version=4) io.assert_not_called() diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index dbd8764ad9..a9c05976e0 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -16,6 +16,8 @@ from collections import deque from threading import RLock + +import six from mock import Mock, MagicMock, ANY from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType, OperationTimedOut @@ -158,7 +160,7 @@ def test_heartbeat_defunct_deadlock(self): # Simulate ResponseFuture timing out rf._on_timeout() - self.assertRaisesRegexp(OperationTimedOut, "Connection defunct by heartbeat", rf.result) + six.assertRaisesRegex(self, OperationTimedOut, "Connection defunct by heartbeat", rf.result) def test_read_timeout_error_message(self): session = self.make_session() diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index 58958cff03..fc1be071ad 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -15,6 +15,7 @@ import unittest import mock +import six from cassandra import timestamps from threading import Thread, Lock @@ -105,7 +106,7 @@ def assertLastCallArgRegex(self, call, pattern): last_warn_args, last_warn_kwargs = call self.assertEqual(len(last_warn_args), 1) self.assertEqual(len(last_warn_kwargs), 0) - self.assertRegexpMatches( + six.assertRegex(self, last_warn_args[0], pattern, ) From 6111c0c2d850433552ba87aa61cdda3c88d139eb Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 9 Mar 2023 16:24:58 -0600 Subject: [PATCH 119/551] Remove references to unsupported Python versions from setup.py --- setup.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/setup.py b/setup.py index aaaa1b4d2d..056469aca6 100644 --- a/setup.py +++ b/setup.py @@ -37,8 +37,6 @@ DistutilsExecError) from distutils.cmd import Command -PY3 = sys.version_info[0] == 3 - try: import subprocess has_subprocess = True @@ -406,9 +404,6 @@ def run_setup(extensions): dependencies = ['six >=1.9', 'geomet>=0.1,<0.3'] - if not PY3: - dependencies.append('futures') - _EXTRAS_REQUIRE = { 'graph': ['gremlinpython==3.4.6'] } @@ -442,9 +437,6 @@ def run_setup(extensions): 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation :: CPython', From 922d7ad565b4d3b3e0aafd6898c2639d968c6534 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Thu, 9 Mar 2023 17:09:13 -0600 Subject: [PATCH 120/551] Minor refactor of prior commit: now that we're dropping 2.7.x support we don't really need to leverage six for unit test functions. --- tests/integration/advanced/graph/test_graph.py | 2 +- .../cqlengine/management/test_compaction_settings.py | 6 +++--- tests/integration/cqlengine/management/test_management.py | 2 +- .../cqlengine/model/test_class_construction.py | 2 +- tests/integration/cqlengine/test_batch_query.py | 2 +- tests/integration/long/test_ipv6.py | 4 ++-- tests/integration/simulacron/test_connection.py | 2 +- tests/integration/standard/test_authentication.py | 8 ++++---- tests/integration/standard/test_client_warnings.py | 8 ++++---- tests/integration/standard/test_cluster.py | 6 +++--- tests/integration/standard/test_metadata.py | 6 +++--- tests/integration/standard/test_types.py | 2 +- tests/unit/advanced/test_graph.py | 2 +- tests/unit/cqlengine/test_connection.py | 4 ++-- tests/unit/test_connection.py | 2 +- tests/unit/test_policies.py | 2 +- tests/unit/test_protocol.py | 2 +- tests/unit/test_response_future.py | 2 +- 18 files changed, 32 insertions(+), 32 deletions(-) diff --git a/tests/integration/advanced/graph/test_graph.py b/tests/integration/advanced/graph/test_graph.py index a0b6534c34..277283ea5a 100644 --- a/tests/integration/advanced/graph/test_graph.py +++ b/tests/integration/advanced/graph/test_graph.py @@ -266,6 +266,6 @@ def test_graph_protocol_default_for_core_fallback_to_graphson1_if_no_graph_name( self.assertEqual(ep.row_factory, graph_object_row_factory) regex = re.compile(".*Variable.*is unknown.*", re.S) - with six.assertRaisesRegex(self, SyntaxException, regex): + with self.assertRaisesRegex(SyntaxException, regex): self.execute_graph_queries(CoreGraphSchema.fixtures.classic(), execution_profile=ep, verify_graphson=GraphProtocol.GRAPHSON_1_0) diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 673bda29a7..604e225586 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -83,7 +83,7 @@ def test_alter_actually_alters(self): table_meta = _get_table_metadata(tmp) - six.assertRegex(self, table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') + self.assertRegex(table_meta.export_as_string(), '.*SizeTieredCompactionStrategy.*') def test_alter_options(self): @@ -97,11 +97,11 @@ class AlterTable(Model): drop_table(AlterTable) sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - six.assertRegex(self, table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") + self.assertRegex(table_meta.export_as_string(), ".*'sstable_size_in_mb': '64'.*") AlterTable.__options__['compaction']['sstable_size_in_mb'] = '128' sync_table(AlterTable) table_meta = _get_table_metadata(AlterTable) - six.assertRegex(self, table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") + self.assertRegex(table_meta.export_as_string(), ".*'sstable_size_in_mb': '128'.*") class OptionsTest(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index 67f87b10e4..2fd35b865e 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -262,7 +262,7 @@ def test_bogus_option_update(self): option = 'no way will this ever be an option' try: ModelWithTableProperties.__options__[option] = 'what was I thinking?' - six.assertRaisesRegex(self, KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) + self.assertRaisesRegex(KeyError, "Invalid table option.*%s.*" % option, sync_table, ModelWithTableProperties) finally: ModelWithTableProperties.__options__.pop(option, None) diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index 95ba1f49bd..f764e78e5c 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -92,7 +92,7 @@ def test_attempting_to_make_duplicate_column_names_fails(self): Tests that trying to create conflicting db column names will fail """ - with six.assertRaisesRegex(self, ModelException, r".*more than once$"): + with self.assertRaisesRegex(ModelException, r".*more than once$"): class BadNames(Model): words = columns.Text(primary_key=True) content = columns.Text(db_field='words') diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 07ee2e13bf..94496727a7 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -224,7 +224,7 @@ def my_callback(*args, **kwargs): batch.execute() batch.execute() self.assertEqual(len(w), 2) # package filter setup to warn always - six.assertRegex(self, str(w[0].message), r"^Batch.*multiple.*") + self.assertRegex(str(w[0].message), r"^Batch.*multiple.*") def test_disable_multiple_callback_warning(self): """ diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 6c7d447dfb..3e2f2ffc5e 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -83,7 +83,7 @@ def test_connect(self): def test_error(self): cluster = TestCluster(connection_class=self.connection_class, contact_points=['::1'], port=9043, connect_timeout=10) - six.assertRaisesRegex(self, NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' + self.assertRaisesRegex(NoHostAvailable, '\(\'Unable to connect.*%s.*::1\', 9043.*Connection refused.*' % errno.ECONNREFUSED, cluster.connect) def test_error_multiple(self): @@ -91,7 +91,7 @@ def test_error_multiple(self): raise unittest.SkipTest('localhost only resolves one address') cluster = TestCluster(connection_class=self.connection_class, contact_points=['localhost'], port=9043, connect_timeout=10) - six.assertRaisesRegex(self, NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', + self.assertRaisesRegex(NoHostAvailable, '\(\'Unable to connect.*Tried connecting to \[\(.*\(.*\].*Last error', cluster.connect) diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index e34e69f458..1def601d2e 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -263,7 +263,7 @@ def connection_factory(self, *args, **kwargs): prime_request(PrimeOptions(then={"result": "no_result", "delay_in_ms": never})) prime_request(RejectConnections("unbind")) - six.assertRaisesRegex(self, OperationTimedOut, "Connection defunct by heartbeat", future.result) + self.assertRaisesRegex(OperationTimedOut, "Connection defunct by heartbeat", future.result) def test_close_when_query(self): """ diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index c23c9eedf2..2f8ffbb068 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -122,7 +122,7 @@ def test_auth_connect(self): def test_connect_wrong_pwd(self): cluster = self.cluster_as('cassandra', 'wrong_pass') try: - six.assertRaisesRegex(self, NoHostAvailable, + self.assertRaisesRegex(NoHostAvailable, '.*AuthenticationFailed.', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -132,7 +132,7 @@ def test_connect_wrong_pwd(self): def test_connect_wrong_username(self): cluster = self.cluster_as('wrong_user', 'cassandra') try: - six.assertRaisesRegex(self, NoHostAvailable, + self.assertRaisesRegex(NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -142,7 +142,7 @@ def test_connect_wrong_username(self): def test_connect_empty_pwd(self): cluster = self.cluster_as('Cassandra', '') try: - six.assertRaisesRegex(self, NoHostAvailable, + self.assertRaisesRegex(NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) @@ -152,7 +152,7 @@ def test_connect_empty_pwd(self): def test_connect_no_auth_provider(self): cluster = TestCluster() try: - six.assertRaisesRegex(self, NoHostAvailable, + self.assertRaisesRegex(NoHostAvailable, '.*AuthenticationFailed.*', cluster.connect) assert_quiescent_pool_state(self, cluster) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 166f172a16..37003d5213 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -71,7 +71,7 @@ def test_warning_basic(self): future = self.session.execute_async(self.warn_batch) future.result() self.assertEqual(len(future.warnings), 1) - six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') + self.assertRegex(future.warnings[0], 'Batch.*exceeding.*') def test_warning_with_trace(self): """ @@ -87,7 +87,7 @@ def test_warning_with_trace(self): future = self.session.execute_async(self.warn_batch, trace=True) future.result() self.assertEqual(len(future.warnings), 1) - six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') + self.assertRegex(future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) @local @@ -106,7 +106,7 @@ def test_warning_with_custom_payload(self): future = self.session.execute_async(self.warn_batch, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) - six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') + self.assertRegex(future.warnings[0], 'Batch.*exceeding.*') self.assertDictEqual(future.custom_payload, payload) @local @@ -125,6 +125,6 @@ def test_warning_with_trace_and_custom_payload(self): future = self.session.execute_async(self.warn_batch, trace=True, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) - six.assertRegex(self, future.warnings[0], 'Batch.*exceeding.*') + self.assertRegex(future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) self.assertDictEqual(future.custom_payload, payload) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index c5f64f6c28..ae6e3e5a4e 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -148,7 +148,7 @@ def test_raise_error_on_control_connection_timeout(self): get_node(1).pause() cluster = TestCluster(contact_points=['127.0.0.1'], connect_timeout=1) - with six.assertRaisesRegex(self, NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): + with self.assertRaisesRegex(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"): cluster.connect() cluster.shutdown() @@ -536,7 +536,7 @@ def patched_wait_for_responses(*args, **kwargs): # cluster agreement wait used for refresh original_meta = c.metadata.keyspaces start_time = time.time() - six.assertRaisesRegex(self, Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) + self.assertRaisesRegex(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) self.assertIs(original_meta, c.metadata.keyspaces) @@ -573,7 +573,7 @@ def patched_wait_for_responses(*args, **kwargs): # refresh wait overrides cluster value original_meta = c.metadata.keyspaces start_time = time.time() - six.assertRaisesRegex(self, Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, + self.assertRaisesRegex(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata, max_schema_agreement_wait=agreement_timeout) end_time = time.time() self.assertGreaterEqual(end_time - start_time, agreement_timeout) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 6f76c2a9b0..b83df22032 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1590,7 +1590,7 @@ def test_function_no_parameters(self): with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name']) + self.assertRegex(fn_meta.as_cql_query(), "CREATE FUNCTION.*%s\(\) .*" % kwargs['name']) def test_functions_follow_keyspace_alter(self): """ @@ -1638,12 +1638,12 @@ def test_function_cql_called_on_null(self): kwargs['called_on_null_input'] = True with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") + self.assertRegex(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) CALLED ON NULL INPUT RETURNS .*") kwargs['called_on_null_input'] = False with self.VerifiedFunction(self, **kwargs) as vf: fn_meta = self.keyspace_function_meta[vf.signature] - six.assertRegex(self, fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") + self.assertRegex(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") class AggregateMetadata(FunctionTest): diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 828f10b5e2..6e2e9f7328 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -69,7 +69,7 @@ def test_can_insert_blob_type_as_string(self): msg = r'.*Invalid STRING constant \(.*?\) for "b" of type blob.*' else: msg = r'.*Invalid STRING constant \(.*?\) for b of type blob.*' - six.assertRaisesRegex(self, InvalidRequest, msg, s.execute, query, params) + self.assertRaisesRegex(InvalidRequest, msg, s.execute, query, params) return # In python2, with Cassandra < 2.0, we can manually encode the 'byte str' type as hex for insertion in a blob. diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index 77a920a3bf..a98a48c82f 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -259,7 +259,7 @@ def test_init_unknown_kwargs(self): with warnings.catch_warnings(record=True) as w: GraphOptions(unknown_param=42) self.assertEqual(len(w), 1) - six.assertRegex(self, str(w[0].message), r"^Unknown keyword.*GraphOptions.*") + self.assertRegex(str(w[0].message), r"^Unknown keyword.*GraphOptions.*") def test_update(self): opts = GraphOptions(**self.api_params) diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 9c3454796a..962ee06b52 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -52,12 +52,12 @@ def test_get_session_fails_without_existing_connection(self): """ Users can't get the default session without having a default connection set. """ - with six.assertRaisesRegex(self, connection.CQLEngineException, self.no_registered_connection_msg): + with self.assertRaisesRegex(connection.CQLEngineException, self.no_registered_connection_msg): connection.get_session(connection=None) def test_get_cluster_fails_without_existing_connection(self): """ Users can't get the default cluster without having a default connection set. """ - with six.assertRaisesRegex(self, connection.CQLEngineException, self.no_registered_connection_msg): + with self.assertRaisesRegex(connection.CQLEngineException, self.no_registered_connection_msg): connection.get_cluster(connection=None) diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index 97faa5e7fc..bc6749a477 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -392,7 +392,7 @@ def send_msg(msg, req_id, msg_callback): connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) exc = connection.defunct.call_args_list[0][0][0] self.assertIsInstance(exc, ConnectionException) - six.assertRegex(self, exc.args[0], r'^Received unexpected response to OptionsMessage.*') + self.assertRegex(exc.args[0], r'^Received unexpected response to OptionsMessage.*') holder.return_connection.assert_has_calls( [call(connection)] * get_holders.call_count) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index edafb7cb01..a6c63dcfdc 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -1301,7 +1301,7 @@ def test_immutable_predicate(self): expected_message_regex = "can't set attribute" hfp = HostFilterPolicy(child_policy=Mock(name='child_policy'), predicate=Mock(name='predicate')) - with six.assertRaisesRegex(self, AttributeError, expected_message_regex): + with self.assertRaisesRegex(AttributeError, expected_message_regex): hfp.predicate = object() diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 3d6828bdc5..0f251ffc0e 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -173,7 +173,7 @@ def test_keyspace_flag_raises_before_v5(self): keyspace_message = QueryMessage('a', consistency_level=3, keyspace='ks') io = Mock(name='io') - with six.assertRaisesRegex(self, UnsupportedOperation, 'Keyspaces.*set'): + with self.assertRaisesRegex(UnsupportedOperation, 'Keyspaces.*set'): keyspace_message.send_body(io, protocol_version=4) io.assert_not_called() diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index a9c05976e0..273490072f 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -160,7 +160,7 @@ def test_heartbeat_defunct_deadlock(self): # Simulate ResponseFuture timing out rf._on_timeout() - six.assertRaisesRegex(self, OperationTimedOut, "Connection defunct by heartbeat", rf.result) + self.assertRaisesRegex(OperationTimedOut, "Connection defunct by heartbeat", rf.result) def test_read_timeout_error_message(self): session = self.make_session() From bf7abff6e541dd720f739733f897279f29f5b7cd Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 14:19:21 -0500 Subject: [PATCH 121/551] Update Travis config to only run versions that will be supported going forward --- .travis.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 906775e90c..7e59fa486d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,13 +3,9 @@ sudo: false language: python python: - - "2.7" - - "3.5" - - "3.6" - "3.7" - "3.8" - - "pypy2.7-6.0" - - "pypy3.5" + - "pypy3.7" env: - CASS_DRIVER_NO_CYTHON=1 From ee3f3af95cdd66075aa4da2da71a92caad74165a Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 14:45:55 -0500 Subject: [PATCH 122/551] Trying to get to a maximal working Pypy version. Have to go back to 3.6 which isn't ideal... --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7e59fa486d..9f9c450a77 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ language: python python: - "3.7" - "3.8" - - "pypy3.7" + - "pypy3.6" env: - CASS_DRIVER_NO_CYTHON=1 From fa9b7af85bf07c381ff955790faa3ec7d13c7713 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 14:48:45 -0500 Subject: [PATCH 123/551] Forgot to add complete extension --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9f9c450a77..54d3a6c89c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ language: python python: - "3.7" - "3.8" - - "pypy3.6" + - "pypy3.6-7.0.0" env: - CASS_DRIVER_NO_CYTHON=1 From fdac31e5c93f867166861e1573ee41b00f3e78f7 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 15:52:34 -0500 Subject: [PATCH 124/551] Going back to known good non-2.7 PyPy target. PYTHON-1333 has more detail. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 54d3a6c89c..4d94d86087 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ language: python python: - "3.7" - "3.8" - - "pypy3.6-7.0.0" + - "pypy3.5" env: - CASS_DRIVER_NO_CYTHON=1 From f5001b8759d34896a1899b791a5ca57db8ba8069 Mon Sep 17 00:00:00 2001 From: Bret McGuire Date: Mon, 13 Mar 2023 16:57:23 -0500 Subject: [PATCH 125/551] Release 3.26: changelog & version --- CHANGELOG.rst | 33 +++++++++++++++++++++++++++++++++ cassandra/__init__.py | 2 +- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d2d577c957..fc7a702534 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,36 @@ +3.26.0 +====== +March 13, 2023 + +Features +-------- +* Add support for execution profiles in execute_concurrent (PR 1122) + +Bug Fixes +--------- +* Handle empty non-final result pages (PR 1110) +* Do not re-use stream IDs for in-flight requests (PR 1114) +* Asyncore race condition cause logging exception on shutdown (PYTHON-1266) + +Others +------ +* Fix deprecation warning in query tracing (PR 1103) +* Remove mutable default values from some tests (PR 1116) +* Remove dependency on unittest2 (PYTHON-1289) +* Fix deprecation warnings for asyncio.coroutine annotation in asyncioreactor (PYTTHON-1290) +* Fix typos in source files (PR 1126) +* HostFilterPolicyInitTest fix for Python 3.11 (PR 1131) +* Fix for DontPrepareOnIgnoredHostsTest (PYTHON-1287) +* tests.integration.simulacron.test_connection failures (PYTHON-1304) +* tests.integration.standard.test_single_interface.py appears to be failing for C* 4.0 (PYTHON-1329) +* Authentication tests appear to be failing fraudulently (PYTHON-1328) +* PreparedStatementTests.test_fail_if_different_query_id_on_reprepare() failing unexpectedly (PTYHON-1327) +* Refactor deprecated unittest aliases for Python 3.11 compatibility (PR 1112) + +Deprecations +------------ +* This release removes support for Python 2.7.x as well as Python 3.5.x and 3.6.x + 3.25.0 ====== March 18, 2021 diff --git a/cassandra/__init__.py b/cassandra/__init__.py index e6cb5c55bb..e14f20c6ed 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 25, 0) +__version_info__ = (3, 26, 0) __version__ = '.'.join(map(str, __version_info__)) From d5b13c44d1341ac3aa7e983d893e5fb997dfe23d Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 21 Mar 2023 20:52:37 +0200 Subject: [PATCH 126/551] Fix ScyllaCloudConfigTests that was failing with missing argument scylladb/scylla-ccm#441 introduced a change to a function signiture, and the test was failing since it wasn't passing down the node_info parameter. Ref: https://github.com/scylladb/scylla-ccm/pull/441 --- tests/integration/standard/test_scylla_cloud.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_scylla_cloud.py b/tests/integration/standard/test_scylla_cloud.py index 422a66f318..94fb07290e 100644 --- a/tests/integration/standard/test_scylla_cloud.py +++ b/tests/integration/standard/test_scylla_cloud.py @@ -46,7 +46,8 @@ def start_cluster_with_proxy(self): ccm_cluster._update_config() config_data_yaml, config_path_yaml = create_cloud_config(ccm_cluster.get_path(), - port=listen_port, address=listen_address) + port=listen_port, address=listen_address, + nodes_info=nodes_info) return config_data_yaml, config_path_yaml def test_1_node_cluster(self): From d1f4d67f662d9230913fd9639a4c8aebf6dd154b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 21 Mar 2023 15:29:35 +0100 Subject: [PATCH 127/551] Drop Python2 support - Remove Python 2 CI - Update list of supported versions in README.rst - Remove unnecessary entries in requirements --- .../workflows/integration-tests-python2.yml | 25 ------------ .github/workflows/test-python2.yaml | 40 ------------------- README.rst | 2 +- requirements.txt | 5 --- test-requirements.txt | 9 ++--- 5 files changed, 4 insertions(+), 77 deletions(-) delete mode 100644 .github/workflows/integration-tests-python2.yml delete mode 100644 .github/workflows/test-python2.yaml diff --git a/.github/workflows/integration-tests-python2.yml b/.github/workflows/integration-tests-python2.yml deleted file mode 100644 index bdcc878d15..0000000000 --- a/.github/workflows/integration-tests-python2.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: Integration tests Python2 - -on: - pull_request: - branches: - - master - push: - branches: - - master - -jobs: - tests: - runs-on: ubuntu-20.04 - if: "!contains(github.event.pull_request.labels.*.name, 'disable-integration-tests')" - steps: - - uses: actions/checkout@v2 - - name: Install Python2.7 - uses: actions/setup-python@v4 - with: - python-version: 2.7 - - - name: Test with pytest - run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py tests/integration/standard/test_ip_change.py tests/integration/cqlengine/ - # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py diff --git a/.github/workflows/test-python2.yaml b/.github/workflows/test-python2.yaml deleted file mode 100644 index da51f8e169..0000000000 --- a/.github/workflows/test-python2.yaml +++ /dev/null @@ -1,40 +0,0 @@ -name: Build and test python2 - -on: [push, pull_request] - -jobs: - test: - name: Test on python2 - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-python@v4 - name: Install Python2.7 - with: - python-version: '2.7' - - name: Run unittests - run: |- - pip install -r ./test-requirements.txt - pytest --import-mode append ./tests/unit -k 'not (test_connection_initialization or test_cloud)' - EVENT_LOOP_MANAGER=gevent pytest --import-mode append ./tests/unit/io/test_geventreactor.py - EVENT_LOOP_MANAGER=eventlet pytest --import-mode append ./tests/unit/io/test_eventletreactor.py - - build: - name: Build source/wheel distribution for python2 - if: "(!contains(github.event.pull_request.labels.*.name, 'disable-test-build'))|| github.event_name == 'push' && endsWith(github.event.ref, 'scylla')" - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-python@v4 - name: Install Python2.7 - with: - python-version: '2.7' - - - name: Build sdist - run: python setup.py sdist - - - uses: actions/upload-artifact@v2 - with: - path: dist/*.tar.gz diff --git a/README.rst b/README.rst index eaf5106c8d..643272cbf4 100644 --- a/README.rst +++ b/README.rst @@ -10,7 +10,7 @@ Scylla Enterprise (2018.1.x+) using exclusively Cassandra's binary protocol and .. image:: https://github.com/scylladb/python-driver/workflows/CI%20Docs/badge.svg?tag=*-scylla :target: https://github.com/scylladb/python-driver/actions?query=workflow%3A%22CI+Docs%22+event%3Apush+branch%3A*-scylla -The driver supports Python versions 2.7, 3.4, 3.5, 3.6, 3.7 and 3.8. +The driver supports Python versions 3.6-3.11. .. **Note:** This driver does not support big-endian systems. diff --git a/requirements.txt b/requirements.txt index 28a897b034..732bba1018 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,2 @@ geomet>=0.1,<0.3 six >=1.9 -futures==3.4.0; python_version < '3.0.0' -# Futures is not required for Python 3, but it works up through 2.2.0 (after which it introduced breaking syntax). -# This is left here to make sure install -r works with any runtime. When installing via setup.py, futures is omitted -# for Python 3, in favor of the standard library implementation. -# see PYTHON-393 diff --git a/test-requirements.txt b/test-requirements.txt index 887af99f9d..780fa89e18 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,16 +5,13 @@ mock>1.1 pytz sure pure-sasl -twisted[tls]; python_version >= '3.5' or python_version < '3.0' -twisted[tls]==19.2.1; python_version < '3.5' and python_version >= '3.0' +twisted[tls]; python_version >= '3.5' +twisted[tls]==19.2.1; python_version < '3.5' gevent>=1.0; platform_machine != 'i686' and platform_machine != 'win32' gevent==20.5.0; platform_machine == 'i686' or platform_machine == 'win32' eventlet>=0.33.3 -cython>=0.20,<0.30 ; python_version > '3.0' -cython==0.23.1 ; python_version < '3.0' +cython>=0.20,<0.30 packaging -backports.ssl_match_hostname; python_version < '2.7.9' futurist; python_version >= '3.7' asynctest; python_version >= '3.5' -ipaddress; python_version < '3.3.0' pyyaml From b363b494446d7c3cb96bfe532a9836741c7777f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 21 Mar 2023 15:30:51 +0100 Subject: [PATCH 128/551] Remove some Python2 compatibility workarounds Reverts some workarounds introduced in https://github.com/scylladb/python-driver/pull/176 --- cassandra/pool.py | 6 +----- cassandra/scylla/cloud.py | 14 ++++++-------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index e310cb39e7..99d0050488 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -15,8 +15,6 @@ """ Connection pooling and host management. """ -from __future__ import absolute_import - from concurrent.futures import Future from functools import total_ordering import logging @@ -1198,9 +1196,7 @@ def shutdown(self): with self._lock: connections_to_close.extend(self._connections) self.open_count -= len(self._connections) - # After dropping support for Python 2 we can again use list.clear() - # self._connections.clear() - del self._connections[:] + self._connections.clear() connections_to_close.extend(self._trash) self._trash.clear() diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index 9ba898ba3b..40ef439aaf 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -52,14 +52,12 @@ def nth(iterable, n, default=None): class CloudConfiguration: - # Commented out because this syntax doesn't work with Python2 - # Can be restores after dropping support for Python2 - # endpoint_factory: SniEndPointFactory - # contact_points: list - # auth_provider: AuthProvider = None - # ssl_options: dict - # ssl_context: SSLContext - # skip_tls_verify: bool + endpoint_factory: SniEndPointFactory + contact_points: list + auth_provider: AuthProvider = None + ssl_options: dict + ssl_context: SSLContext + skip_tls_verify: bool def __init__(self, configuration_file, pyopenssl=False, endpoint_factory=None): cloud_config = yaml.safe_load(open(configuration_file)) From 08e51352aca5d76c5ec2e68389b22109fbe7e2c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 27 Mar 2023 15:45:14 +0200 Subject: [PATCH 129/551] Fix failing test Recently ccm changed default smp from 1 to 2. This caused 1 of integration tests to fail. Specify smp manually to fix the failing test. --- tests/integration/standard/test_cluster.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index e6d2484a7d..86fb26e962 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -24,6 +24,7 @@ import logging import warnings from packaging.version import Version +import os import cassandra from cassandra.cluster import NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT, ControlConnection, Cluster @@ -50,6 +51,7 @@ def setup_module(): + os.environ['SCYLLA_EXT_OPTS'] = "--smp 1" use_singledc() warnings.simplefilter("always") From f5c34f0bda4291dce701596d7816c43da9314a58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 17 Jan 2023 15:43:25 +0100 Subject: [PATCH 130/551] Fix wait_for_schema_agreement deadlock Should fix issue scylladb#168. See the issue description for more detailed description of the bug. Fix works by creating a new version of wait_for_schema_agreement, called _wait_for_schema_agreement_async that schedules new task for each iteration of loop, instead of sleeping. That way, thread executor can run other functions instead of being stuck with wait_for_schema_agreement, allowing on_down notification to be handled and node registered as down, which in turn allows for schema agreement wait to finish. Before fix (steps are different that those described in the issue, as the issue was partially incorrect): 1. The driver has control_connection established to node A. 2. We kill a node B forcefully. 3. Then we immediately schedule a schema change on A. 4. A sends a notification to the driver. 5. The driver schedules wait_for_schema_agreement tasks in the executor. 6. Wait_for_schema_agreement gets stuck because A has a different schema version than B and B is considered up by the driver. 7. Eventually, driver notices that B is down. 8. The driver submits the on_down task, but there are no available threads in the pool, so we don't set is_up = False for B. 9. wait_for_schema_agreeement never finishes (until timeout). on_down is never executed. We've deadlocked. After the fix: 1. The driver has control_connection established to node A. 2. We kill a node B forcefully. 3. Then we immediately schedule a schema change on A. 4. A sends a notification to the driver. 5. The driver starts _wait_for_schema_agreement_async. 6. _wait_for_schema_agreement_async::inner() is executed in an interval. It continues to be scheduled because A has a different schema version than B and B is considered up by the driver, so it can't finish. 7. Eventually, driver notices that B is down. 8. The driver submits the on_down task. 9. The task is executed by the thread pool. is_up = False is set for B. 9. _wait_for_schema_agreement_async::inner() ceases to be scheduled, callback is called. --- cassandra/cluster.py | 134 +++++++++++++++++++++++++- tests/unit/test_control_connection.py | 6 +- 2 files changed, 136 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6385387ed1..2007cb5ae7 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3783,6 +3783,138 @@ def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_w self._cluster.metadata.refresh(connection, self._timeout, fetch_size=self._schema_meta_page_size, **kwargs) return True + + # Three functions below (_refresh_schema_async, _refresh_schema_async_inner, _wait_for_schema_agreement_async) are async + # versions of the functions without _async in name - instead of blocking and returning result, their first argument + # is a callback that will receive either a result or an exception. + # Purpose of those functions is to avoid filling whole thread pool and deadlocking. + def _refresh_schema_async(self, callback, force=False, **kwargs): + def new_callback(e): + if isinstance(e, ReferenceError): + # our weak reference to the Cluster is no good + callback(False) + return + elif isinstance(e, Exception): + log.debug("[control connection] Error refreshing schema", exc_info=True) + self._signal_error() + callback(False) + return + else: + callback(e) + if self._connection: + self._refresh_schema_async_inner(new_callback, self._connection, force=force, **kwargs) + else: + callback(False) + + def _refresh_schema_async_inner(self, callback, connection, preloaded_results=None, schema_agreement_wait=None, force=False, **kwargs): + if self._cluster.is_shutdown: + callback(False) + return + + def new_callback(e): + if not self._schema_meta_enabled and not force: + log.debug("[control connection] Skipping schema refresh because schema metadata is disabled") + callback(False) + return + + if not e: + log.debug("Skipping schema refresh due to lack of schema agreement") + callback(False) + return + self._cluster.metadata.refresh(connection, self._timeout, fetch_size=self._schema_meta_page_size, **kwargs) + + self._wait_for_schema_agreement_async(new_callback, + connection=self._connection, + preloaded_results=preloaded_results, + wait_time=schema_agreement_wait) + + # INTENDED ONLY FOR INTERNAL USE + def _wait_for_schema_agreement_async(self, callback, connection=None, preloaded_results=None, wait_time=None): + total_timeout = wait_time if wait_time is not None else self._cluster.max_schema_agreement_wait + if total_timeout <= 0: + callback(True) + return + + # Each schema change typically generates two schema refreshes, one + # from the response type and one from the pushed notification. Holding + # a lock is just a simple way to cut down on the number of schema queries + # we'll make. + if not self._schema_agreement_lock.acquire(blocking=False): + self._cluster.scheduler.schedule_unique(0.2, self._wait_for_schema_agreement_async, callback, connection, preloaded_results, wait_time) + return + + try: + if self._is_shutdown: + self._schema_agreement_lock.release() + callback(None) + return + + if not connection: + connection = self._connection + + if preloaded_results: + log.debug("[control connection] Attempting to use preloaded results for schema agreement") + + peers_result = preloaded_results[0] + local_result = preloaded_results[1] + schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.endpoint) + if schema_mismatches is None: + self._schema_agreement_lock.release() + callback(True) + return + + log.debug("[control connection] Waiting for schema agreement") + start = self._time.time() + elapsed = 0 + cl = ConsistencyLevel.ONE + schema_mismatches = None + select_peers_query = self._get_peers_query(self.PeersQueryType.PEERS_SCHEMA, connection) + except Exception as e: + self._schema_agreement_lock.release() + callback(e) + return + + def inner(first_iter): + try: + elapsed = self._time.time() - start + if elapsed < total_timeout or first_iter: + peers_query = QueryMessage(query=select_peers_query, consistency_level=cl) + local_query = QueryMessage(query=self._SELECT_SCHEMA_LOCAL, consistency_level=cl) + try: + timeout = min(self._timeout, total_timeout - elapsed) + peers_result, local_result = connection.wait_for_responses( + peers_query, local_query, timeout=timeout) + except OperationTimedOut as timeout: + log.debug("[control connection] Timed out waiting for " + "response during schema agreement check: %s", timeout) + self._cluster.scheduler.schedule_unique(0.2, inner, False) + return + except ConnectionShutdown as e: + if self._is_shutdown: + log.debug("[control connection] Aborting wait for schema match due to shutdown") + self._schema_agreement_lock.release() + callback(None) + return + else: + raise + + schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.endpoint) + if schema_mismatches is None: + self._schema_agreement_lock.release() + callback(True) + return + + log.debug("[control connection] Schemas mismatched, trying again") + self._cluster.scheduler.schedule_unique(0.2, inner, False) + else: + log.warning("Node %s is reporting a schema disagreement: %s", + connection.endpoint, schema_mismatches) + self._schema_agreement_lock.release() + callback(False) + except Exception as e: + self._schema_agreement_lock.release() + callback(e) + inner(True) def refresh_node_list_and_token_map(self, force_token_rebuild=False): try: @@ -4039,7 +4171,7 @@ def _handle_schema_change(self, event): if self._schema_event_refresh_window < 0: return delay = self._delay_for_event_type('schema_change', self._schema_event_refresh_window) - self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, **event) + self._cluster.scheduler.schedule_unique(delay, self._refresh_schema_async, lambda *a, **k: None, **event) def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None): diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index a4157fc493..e8bf918f51 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -512,13 +512,13 @@ def test_handle_schema_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, **event) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_schema_async, ANY, **event) self.cluster.scheduler.reset_mock() event['target_type'] = SchemaTargetType.KEYSPACE del event['table'] self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, **event) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_schema_async, ANY, **event) def test_refresh_disabled(self): cluster = MockCluster() @@ -566,7 +566,7 @@ def test_refresh_disabled(self): cc_no_topo_refresh._handle_status_change(status_event) cc_no_topo_refresh._handle_schema_change(schema_event) cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_topo_refresh.refresh_node_list_and_token_map), - call(0.0, cc_no_topo_refresh.refresh_schema, + call(0.0, cc_no_topo_refresh._refresh_schema_async, ANY, **schema_event)]) def test_refresh_nodes_and_tokens_add_host_detects_port(self): From 560e195f6c5c5dc4f4b54172c6a5ed79f7bb6fc0 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Mon, 13 Mar 2023 18:07:07 +0000 Subject: [PATCH 131/551] docs: Update theme 1.4.1 --- README.rst | 8 ++--- docs/.nav | 2 +- docs/core_graph.rst | 2 +- .../{third_party.rst => third-party.rst} | 0 .../{upgrade_guide.rst => upgrade-guide.rst} | 0 ...ates_and_times.rst => dates-and-times.rst} | 0 ...on_profiles.rst => execution-profiles.rst} | 0 ...etting_started.rst => getting-started.rst} | 0 docs/graph.rst | 2 +- docs/index.rst | 36 +++++++++---------- docs/{object_mapper.rst => object-mapper.rst} | 8 ++--- docs/pyproject.toml | 2 +- docs/{query_paging.rst => query-paging.rst} | 0 ...erless.rst => scylla-cloud-serverless.rst} | 0 docs/{scylla_cloud.rst => scylla-cloud.rst} | 0 ...cylla_specific.rst => scylla-specific.rst} | 0 ...fined_types.rst => user-defined-types.rst} | 0 17 files changed, 30 insertions(+), 30 deletions(-) rename docs/cqlengine/{third_party.rst => third-party.rst} (100%) rename docs/cqlengine/{upgrade_guide.rst => upgrade-guide.rst} (100%) rename docs/{dates_and_times.rst => dates-and-times.rst} (100%) rename docs/{execution_profiles.rst => execution-profiles.rst} (100%) rename docs/{getting_started.rst => getting-started.rst} (100%) rename docs/{object_mapper.rst => object-mapper.rst} (96%) rename docs/{query_paging.rst => query-paging.rst} (100%) rename docs/{scylla_cloud_serverless.rst => scylla-cloud-serverless.rst} (100%) rename docs/{scylla_cloud.rst => scylla-cloud.rst} (100%) rename docs/{scylla_specific.rst => scylla-specific.rst} (100%) rename docs/{user_defined_types.rst => user-defined-types.rst} (100%) diff --git a/README.rst b/README.rst index 643272cbf4..b1833a8fc5 100644 --- a/README.rst +++ b/README.rst @@ -24,8 +24,8 @@ Features * `Automatic reconnection `_ * Configurable `load balancing `_ and `retry policies `_ * `Concurrent execution utilities `_ -* `Object mapper `_ -* `Shard awareness `_ +* `Object mapper `_ +* `Shard awareness `_ Installation ------------ @@ -43,7 +43,7 @@ The documentation can be found online `here `_ -* `Getting started guide `_ +* `Getting started guide `_ * `API docs `_ * `Performance tips `_ @@ -59,7 +59,7 @@ Object Mapper ------------- cqlengine (originally developed by Blake Eggleston and Jon Haddad, with contributions from the community) is now maintained as an integral part of this package. Refer to -`documentation here `_. +`documentation here `_. Contributing ------------ diff --git a/docs/.nav b/docs/.nav index 807bfd3e6f..af49594d99 100644 --- a/docs/.nav +++ b/docs/.nav @@ -13,7 +13,7 @@ query_paging security upgrading user_defined_types -dates_and_times +dates-and-times cloud faq api diff --git a/docs/core_graph.rst b/docs/core_graph.rst index 6a2109d752..c3fa8d8271 100644 --- a/docs/core_graph.rst +++ b/docs/core_graph.rst @@ -13,7 +13,7 @@ The driver defines three Execution Profiles suitable for graph execution: * :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` * :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` -See :doc:`getting_started` and :doc:`execution_profiles` +See :doc:`getting-started` and :doc:`execution-profiles` for more detail on working with profiles. In DSE 6.8.0, the Core graph engine has been introduced and is now the default. It diff --git a/docs/cqlengine/third_party.rst b/docs/cqlengine/third-party.rst similarity index 100% rename from docs/cqlengine/third_party.rst rename to docs/cqlengine/third-party.rst diff --git a/docs/cqlengine/upgrade_guide.rst b/docs/cqlengine/upgrade-guide.rst similarity index 100% rename from docs/cqlengine/upgrade_guide.rst rename to docs/cqlengine/upgrade-guide.rst diff --git a/docs/dates_and_times.rst b/docs/dates-and-times.rst similarity index 100% rename from docs/dates_and_times.rst rename to docs/dates-and-times.rst diff --git a/docs/execution_profiles.rst b/docs/execution-profiles.rst similarity index 100% rename from docs/execution_profiles.rst rename to docs/execution-profiles.rst diff --git a/docs/getting_started.rst b/docs/getting-started.rst similarity index 100% rename from docs/getting_started.rst rename to docs/getting-started.rst diff --git a/docs/graph.rst b/docs/graph.rst index b0cad4ea36..1b61bbc713 100644 --- a/docs/graph.rst +++ b/docs/graph.rst @@ -13,7 +13,7 @@ The driver defines three Execution Profiles suitable for graph execution: * :data:`~.cluster.EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT` * :data:`~.cluster.EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT` -See :doc:`getting_started` and :doc:`execution_profiles` +See :doc:`getting-started` and :doc:`execution-profiles` for more detail on working with profiles. In DSE 6.8.0, the Core graph engine has been introduced and is now the default. It diff --git a/docs/index.rst b/docs/index.rst index f8c618f837..c21d293b6f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -17,25 +17,25 @@ Contents :doc:`installation` How to install the driver. -:doc:`getting_started` +:doc:`getting-started` A guide through the first steps of connecting to Scylla and executing queries -:doc:`scylla_specific` +:doc:`scylla-specific` A list of feature available only on ``scylla-driver`` -:doc:`execution_profiles` +:doc:`execution-profiles` An introduction to a more flexible way of configuring request execution :doc:`lwt` Working with results of conditional requests -:doc:`object_mapper` +:doc:`object-mapper` Introduction to the integrated object mapper, cqlengine :doc:`performance` Tips for getting good performance. -:doc:`query_paging` +:doc:`query-paging` Notes on paging large query results :doc:`security` @@ -44,16 +44,16 @@ Contents :doc:`upgrading` A guide to upgrading versions of the driver -:doc:`user_defined_types` +:doc:`user-defined-types` Working with Scylla's user-defined types (UDT) -:doc:`dates_and_times` +:doc:`dates-and-times` Some discussion on the driver's approach to working with timestamp, date, time types -:doc:`scylla_cloud` +:doc:`scylla-cloud` Connect to ScyllaDB Cloud -:doc:`scylla_cloud_serverless` +:doc:`scylla-cloud-serverless` Connect to ScyllaDB Cloud Serverless :doc:`CHANGELOG` @@ -70,19 +70,19 @@ Contents api/index installation - getting_started - scylla_specific + getting-started + scylla-specific upgrading - execution_profiles + execution-profiles performance - query_paging + query-paging lwt security - user_defined_types - object_mapper - dates_and_times - scylla_cloud - scylla_cloud_serverless + user-defined-types + object-mapper + dates-and-times + scylla-cloud + scylla-cloud-serverless faq Getting Help diff --git a/docs/object_mapper.rst b/docs/object-mapper.rst similarity index 96% rename from docs/object_mapper.rst rename to docs/object-mapper.rst index 50d3cbf320..421be246ac 100644 --- a/docs/object_mapper.rst +++ b/docs/object-mapper.rst @@ -7,7 +7,7 @@ cqlengine is the Cassandra CQL 3 Object Mapper packaged with this driver Contents -------- -:doc:`cqlengine/upgrade_guide` +:doc:`cqlengine/upgrade-guide` For migrating projects from legacy cqlengine, to the integrated product :doc:`cqlengine/models` @@ -25,7 +25,7 @@ Contents :ref:`API Documentation ` Index of API documentation -:doc:`cqlengine/third_party` +:doc:`cqlengine/third-party` High-level examples in Celery and uWSGI :doc:`cqlengine/faq` @@ -33,12 +33,12 @@ Contents .. toctree:: :hidden: - cqlengine/upgrade_guide + cqlengine/upgrade-guide cqlengine/models cqlengine/queryset cqlengine/batches cqlengine/connections - cqlengine/third_party + cqlengine/third-party cqlengine/faq .. _getting-started: diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 4cff92ee70..4bca5f9db5 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -18,7 +18,7 @@ recommonmark = "0.7.1" redirects_cli ="~0.1.2" sphinx-autobuild = "2021.3.14" sphinx-sitemap = "2.1.0" -sphinx-scylladb-theme = "~1.3.1" +sphinx-scylladb-theme = "~1.4.1" sphinx-multiversion-scylla = "~0.2.11" Sphinx = "4.3.2" scales = "1.0.9" diff --git a/docs/query_paging.rst b/docs/query-paging.rst similarity index 100% rename from docs/query_paging.rst rename to docs/query-paging.rst diff --git a/docs/scylla_cloud_serverless.rst b/docs/scylla-cloud-serverless.rst similarity index 100% rename from docs/scylla_cloud_serverless.rst rename to docs/scylla-cloud-serverless.rst diff --git a/docs/scylla_cloud.rst b/docs/scylla-cloud.rst similarity index 100% rename from docs/scylla_cloud.rst rename to docs/scylla-cloud.rst diff --git a/docs/scylla_specific.rst b/docs/scylla-specific.rst similarity index 100% rename from docs/scylla_specific.rst rename to docs/scylla-specific.rst diff --git a/docs/user_defined_types.rst b/docs/user-defined-types.rst similarity index 100% rename from docs/user_defined_types.rst rename to docs/user-defined-types.rst From 8ec023a8c373060c891b3708966396d7f88cfd3a Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 14 Mar 2023 09:40:09 +0000 Subject: [PATCH 132/551] docs: Add poetry.lock to make clean --- docs/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/Makefile b/docs/Makefile index 93317e21fe..99b2a0f2a8 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -40,6 +40,7 @@ pristine: clean .PHONY: clean clean: rm -rf $(BUILDDIR)/* + rm -f poetry.lock # Generate output commands .PHONY: dirhtml From bea2d23b0c43b725745cd6f048c8ce1c8ade3284 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 31 Mar 2023 09:04:13 +0100 Subject: [PATCH 133/551] doc: Update README --- README-dev.rst | 32 ++++---------------------------- 1 file changed, 4 insertions(+), 28 deletions(-) diff --git a/README-dev.rst b/README-dev.rst index b9de2eebce..e49ec80204 100644 --- a/README-dev.rst +++ b/README-dev.rst @@ -58,35 +58,11 @@ Releasing Building the Docs ================= -*Note*: The docs build instructions have been tested with Sphinx 2.4.4 and Fedora 32. +To build and preview the documentation for the ScyllaDB Python driver locally, you must first manually install `python-driver`. +This is necessary for autogenerating the reference documentation of the driver. +You can find detailed instructions on how to install the driver in the `Installation guide `_. -To build and preview the theme locally, you will need to install the following software: - -- `Git `_ -- `Python 3.7 `_ -- `pip `_ - -Run the following command to build the docs. - -.. code:: console - - cd docs - make preview - -Once the command completes processing, open http://127.0.0.1:5500/ with your preferred browser. - -Building multiple documentation versions -======================================== - -Build docs for all the versions. - -``` -cd docs -make multiversion -``` - Then, open ``docs/_build/dirhtml//index.html`` with your preferred browser. - -**NOTE:** If you only can see docs generated for the master branch, try to run ``git fetch --tags`` to download the latest tags from remote. +After installing the driver, you can build and preview the documentation by following the steps outlined in the `Quickstart guide `_. Tests ===== From c5193385ed63bad480246290e8d456372e9b6c84 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 31 Mar 2023 10:03:27 +0100 Subject: [PATCH 134/551] doc: fix warning --- docs/graph_fluent.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/graph_fluent.rst b/docs/graph_fluent.rst index a59117626f..cada908f2f 100644 --- a/docs/graph_fluent.rst +++ b/docs/graph_fluent.rst @@ -90,7 +90,7 @@ to accomplish this configuration: Note that the execution profile created with :meth:`DseGraph.create_execution_profile <.datastax.graph.fluent.DseGraph.create_execution_profile>` cannot be used for any groovy string queries. -If you want to change execution property defaults, please see the :doc:`Execution Profile documentation ` +If you want to change execution property defaults, please see the :doc:`Execution Profile documentation ` for a more generalized discussion of the API. Graph traversal queries use the same execution profile defined for DSE graph. If you need to change the default properties, please refer to the :doc:`DSE Graph query documentation page ` From 4f5fd819e94d44d5e3dca48551fd6ddb61878734 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 31 Mar 2023 10:48:19 +0100 Subject: [PATCH 135/551] doc: fix warning --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2007cb5ae7..d28e2593c4 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -534,7 +534,7 @@ def default(self): Key for the default graph execution profile, used when no other profile is selected in ``Session.execute_graph(execution_profile)``. -Use this as the key in :doc:`Cluster(execution_profiles) ` +Use this as the key in :doc:`Cluster(execution_profiles) ` to override the default graph profile. """ From e594869f8ca08446d3764ceb84ff843837270fe6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 13 Apr 2023 17:38:31 +0200 Subject: [PATCH 136/551] docs/conf.py: Build docs for a new version I forgot to do that during version update in `__init__.py`, so it has to be done now in order to publish new docs. --- docs/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index d1bbb5ba33..293fef9823 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.0-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.25.11-scylla' +LATEST_VERSION = '3.26.0-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From 423f6a6c44adad6eebce8f9366cf3c19e057479c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 13 Apr 2023 17:26:22 +0200 Subject: [PATCH 137/551] merge_next_tag_from_upstream.sh: Use name for upstream that works with SSH remotes --- scripts/merge_next_tag_from_upstream.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/merge_next_tag_from_upstream.sh b/scripts/merge_next_tag_from_upstream.sh index 19d999e2cb..25ce3e2ae2 100755 --- a/scripts/merge_next_tag_from_upstream.sh +++ b/scripts/merge_next_tag_from_upstream.sh @@ -6,7 +6,7 @@ # this script assumes remotes for scylladb/python-driver and for datastax/python-driver are configured -upstream_repo_url=https://github.com/datastax/python-driver +upstream_repo_url=datastax/python-driver upstream_repo=$(git remote -v | grep ${upstream_repo_url} | awk '{print $1}' | head -n1) scylla_repo=$(git remote -v | grep scylladb/python-driver | awk '{print $1}' | head -n1) From eef5b942617563df671674ce77df3e2d9e575c1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 13 Apr 2023 17:27:39 +0200 Subject: [PATCH 138/551] merge_next_tag_from_upstream.sh: Proper push command Previous command pushed all tags because of --tags flag, but didn't push branch itself. Pushing all tags is fine, not pushing branch is not, as docs are not being built then. --- scripts/merge_next_tag_from_upstream.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/merge_next_tag_from_upstream.sh b/scripts/merge_next_tag_from_upstream.sh index 25ce3e2ae2..644f483995 100755 --- a/scripts/merge_next_tag_from_upstream.sh +++ b/scripts/merge_next_tag_from_upstream.sh @@ -46,7 +46,7 @@ case "$choice" in git merge --continue git tag ${new_scyla_tag} - git push --tags ${scylla_repo} ${new_scyla_tag} + git push --tags ${scylla_repo} master re-triggering a build of a tag in Travis: From 043de4e8c5990541de60040c3baf31064b0cd548 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 18 Apr 2023 00:02:27 +0300 Subject: [PATCH 139/551] build-experimental.yml: use ubuntu-latest by mistake this action was using ubutnu-1804 and it's now deprecated --- .github/workflows/build-experimental.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-experimental.yml b/.github/workflows/build-experimental.yml index a278c4cf72..43ec5ac701 100644 --- a/.github/workflows/build-experimental.yml +++ b/.github/workflows/build-experimental.yml @@ -10,7 +10,7 @@ jobs: build_wheels: if: contains(github.event.pull_request.labels.*.name, 'test-build-experimental') || github.event_name == 'push' && endsWith(github.event.ref, 'scylla') # The host should always be linux - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest name: Build experimental ${{ matrix.archs }} wheels strategy: fail-fast: false From 459b4cdfb00b385ab5c5fbe85e548485e45eec50 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 18 Apr 2023 00:13:39 +0300 Subject: [PATCH 140/551] build: update to `cibuildwheel==2.12.1` just to be on the lastet version and make sure we have all the needed fixes for all platforms --- .github/workflows/build-experimental.yml | 2 +- .github/workflows/build-push.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-experimental.yml b/.github/workflows/build-experimental.yml index 43ec5ac701..4b1bd5c39e 100644 --- a/.github/workflows/build-experimental.yml +++ b/.github/workflows/build-experimental.yml @@ -32,7 +32,7 @@ jobs: - name: Install cibuildwheel run: | - python -m pip install cibuildwheel==2.11.2 + python -m pip install cibuildwheel==2.12.1 - name: Build wheels run: | diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 55bf95c3d8..1844340e73 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -53,7 +53,7 @@ jobs: - name: Install cibuildwheel run: | - python -m pip install cibuildwheel==2.11.2 + python -m pip install cibuildwheel==2.12.1 - name: Install OpenSSL for Windows if: runner.os == 'Windows' From 5b5933b5cbfa736dc15ef87efa1fb1eb483aa65c Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 18 Apr 2023 09:47:04 +0300 Subject: [PATCH 141/551] build-experimental.yml: enable building python3.11 since those build take much longer, we have a selective specific list of supported version, adding python3.11 to it. would consider removing python3.8. --- .github/workflows/build-experimental.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-experimental.yml b/.github/workflows/build-experimental.yml index 4b1bd5c39e..2e9540ebf3 100644 --- a/.github/workflows/build-experimental.yml +++ b/.github/workflows/build-experimental.yml @@ -4,7 +4,7 @@ on: [push, pull_request] env: CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" - CIBW_BUILD: "cp38* cp39* cp310*" + CIBW_BUILD: "cp38* cp39* cp310* cp311*" CIBW_SKIP: "*musllinux*" jobs: build_wheels: From d7751cba716246bbc0a9789fc789213bda61c8e9 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 24 Apr 2023 17:42:30 +0300 Subject: [PATCH 142/551] Revert "Fix wait_for_schema_agreement deadlock" This reverts commit f5c34f0bda4291dce701596d7816c43da9314a58. This seems to be cause regression for some scylla-core tests till it's figure out we are yanking this fix out Ref: #225 --- cassandra/cluster.py | 134 +------------------------- tests/unit/test_control_connection.py | 6 +- 2 files changed, 4 insertions(+), 136 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index d28e2593c4..31ecd15b6f 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3783,138 +3783,6 @@ def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_w self._cluster.metadata.refresh(connection, self._timeout, fetch_size=self._schema_meta_page_size, **kwargs) return True - - # Three functions below (_refresh_schema_async, _refresh_schema_async_inner, _wait_for_schema_agreement_async) are async - # versions of the functions without _async in name - instead of blocking and returning result, their first argument - # is a callback that will receive either a result or an exception. - # Purpose of those functions is to avoid filling whole thread pool and deadlocking. - def _refresh_schema_async(self, callback, force=False, **kwargs): - def new_callback(e): - if isinstance(e, ReferenceError): - # our weak reference to the Cluster is no good - callback(False) - return - elif isinstance(e, Exception): - log.debug("[control connection] Error refreshing schema", exc_info=True) - self._signal_error() - callback(False) - return - else: - callback(e) - if self._connection: - self._refresh_schema_async_inner(new_callback, self._connection, force=force, **kwargs) - else: - callback(False) - - def _refresh_schema_async_inner(self, callback, connection, preloaded_results=None, schema_agreement_wait=None, force=False, **kwargs): - if self._cluster.is_shutdown: - callback(False) - return - - def new_callback(e): - if not self._schema_meta_enabled and not force: - log.debug("[control connection] Skipping schema refresh because schema metadata is disabled") - callback(False) - return - - if not e: - log.debug("Skipping schema refresh due to lack of schema agreement") - callback(False) - return - self._cluster.metadata.refresh(connection, self._timeout, fetch_size=self._schema_meta_page_size, **kwargs) - - self._wait_for_schema_agreement_async(new_callback, - connection=self._connection, - preloaded_results=preloaded_results, - wait_time=schema_agreement_wait) - - # INTENDED ONLY FOR INTERNAL USE - def _wait_for_schema_agreement_async(self, callback, connection=None, preloaded_results=None, wait_time=None): - total_timeout = wait_time if wait_time is not None else self._cluster.max_schema_agreement_wait - if total_timeout <= 0: - callback(True) - return - - # Each schema change typically generates two schema refreshes, one - # from the response type and one from the pushed notification. Holding - # a lock is just a simple way to cut down on the number of schema queries - # we'll make. - if not self._schema_agreement_lock.acquire(blocking=False): - self._cluster.scheduler.schedule_unique(0.2, self._wait_for_schema_agreement_async, callback, connection, preloaded_results, wait_time) - return - - try: - if self._is_shutdown: - self._schema_agreement_lock.release() - callback(None) - return - - if not connection: - connection = self._connection - - if preloaded_results: - log.debug("[control connection] Attempting to use preloaded results for schema agreement") - - peers_result = preloaded_results[0] - local_result = preloaded_results[1] - schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.endpoint) - if schema_mismatches is None: - self._schema_agreement_lock.release() - callback(True) - return - - log.debug("[control connection] Waiting for schema agreement") - start = self._time.time() - elapsed = 0 - cl = ConsistencyLevel.ONE - schema_mismatches = None - select_peers_query = self._get_peers_query(self.PeersQueryType.PEERS_SCHEMA, connection) - except Exception as e: - self._schema_agreement_lock.release() - callback(e) - return - - def inner(first_iter): - try: - elapsed = self._time.time() - start - if elapsed < total_timeout or first_iter: - peers_query = QueryMessage(query=select_peers_query, consistency_level=cl) - local_query = QueryMessage(query=self._SELECT_SCHEMA_LOCAL, consistency_level=cl) - try: - timeout = min(self._timeout, total_timeout - elapsed) - peers_result, local_result = connection.wait_for_responses( - peers_query, local_query, timeout=timeout) - except OperationTimedOut as timeout: - log.debug("[control connection] Timed out waiting for " - "response during schema agreement check: %s", timeout) - self._cluster.scheduler.schedule_unique(0.2, inner, False) - return - except ConnectionShutdown as e: - if self._is_shutdown: - log.debug("[control connection] Aborting wait for schema match due to shutdown") - self._schema_agreement_lock.release() - callback(None) - return - else: - raise - - schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.endpoint) - if schema_mismatches is None: - self._schema_agreement_lock.release() - callback(True) - return - - log.debug("[control connection] Schemas mismatched, trying again") - self._cluster.scheduler.schedule_unique(0.2, inner, False) - else: - log.warning("Node %s is reporting a schema disagreement: %s", - connection.endpoint, schema_mismatches) - self._schema_agreement_lock.release() - callback(False) - except Exception as e: - self._schema_agreement_lock.release() - callback(e) - inner(True) def refresh_node_list_and_token_map(self, force_token_rebuild=False): try: @@ -4171,7 +4039,7 @@ def _handle_schema_change(self, event): if self._schema_event_refresh_window < 0: return delay = self._delay_for_event_type('schema_change', self._schema_event_refresh_window) - self._cluster.scheduler.schedule_unique(delay, self._refresh_schema_async, lambda *a, **k: None, **event) + self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, **event) def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None): diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 99143183a6..51ea297724 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -512,13 +512,13 @@ def test_handle_schema_change(self): } self.cluster.scheduler.reset_mock() self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_schema_async, ANY, **event) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, **event) self.cluster.scheduler.reset_mock() event['target_type'] = SchemaTargetType.KEYSPACE del event['table'] self.control_connection._handle_schema_change(event) - self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection._refresh_schema_async, ANY, **event) + self.cluster.scheduler.schedule_unique.assert_called_once_with(ANY, self.control_connection.refresh_schema, **event) def test_refresh_disabled(self): cluster = MockCluster() @@ -566,7 +566,7 @@ def test_refresh_disabled(self): cc_no_topo_refresh._handle_status_change(status_event) cc_no_topo_refresh._handle_schema_change(schema_event) cluster.scheduler.schedule_unique.assert_has_calls([call(ANY, cc_no_topo_refresh.refresh_node_list_and_token_map), - call(0.0, cc_no_topo_refresh._refresh_schema_async, ANY, + call(0.0, cc_no_topo_refresh.refresh_schema, **schema_event)]) def test_refresh_nodes_and_tokens_add_host_detects_port(self): From b9f295e7a93febe8e02240e6070fbc389ecdc4b8 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 24 Apr 2023 17:44:33 +0300 Subject: [PATCH 143/551] Release 3.26.1 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index e14f20c6ed..84b459fc98 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 0) +__version_info__ = (3, 26, 1) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 293fef9823..9584c7556a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.0-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.1-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.26.0-scylla' +LATEST_VERSION = '3.26.1-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From 00329a3b34cc43d357afffaab52e80fa316119b7 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 7 May 2023 11:16:57 +0300 Subject: [PATCH 144/551] integration-tests: fix undefiend `logger` variable e6abdf125123e0a8d6b5611efc2a6722faadc6b3 seem to have broke some of the test teardown while trying to cleanup log handlers and failing like the following: ``` self = def tearDown(self): > logger.removeHandler(self.mock_handler) E NameError: name 'logger' is not defined tests/integration/standard/test_query.py:513: NameError ``` --- tests/integration/standard/test_query.py | 6 +++--- tests/integration/upgrade/__init__.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 7eb4cd39c7..cd402fdc96 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -506,11 +506,11 @@ class PreparedStatementArgTest(unittest.TestCase): def setUp(self): self.mock_handler = MockLoggingHandler() - logger = logging.getLogger(cluster.__name__) - logger.addHandler(self.mock_handler) + self.logger = logging.getLogger(cluster.__name__) + self.logger.addHandler(self.mock_handler) def tearDown(self): - logger.removeHandler(self.mock_handler) + self.logger.removeHandler(self.mock_handler) def test_prepare_on_all_hosts(self): """ diff --git a/tests/integration/upgrade/__init__.py b/tests/integration/upgrade/__init__.py index a906f60566..c5c06c4b01 100644 --- a/tests/integration/upgrade/__init__.py +++ b/tests/integration/upgrade/__init__.py @@ -76,12 +76,12 @@ class UpgradeBase(unittest.TestCase): @classmethod def setUpClass(cls): cls.logger_handler = MockLoggingHandler() - logger = logging.getLogger(cluster.__name__) - logger.addHandler(cls.logger_handler) + cls.logger = logging.getLogger(cluster.__name__) + cls.logger.addHandler(cls.logger_handler) @classmethod def tearDownClass(cls): - logger.removeHandler(cls.logger_handler) + cls.logger.removeHandler(cls.logger_handler) def _upgrade_step_setup(self): """ From 6eaceb068f04cae1939144658ac8ad269c38ee90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9o=20Mathieu?= Date: Fri, 21 Apr 2023 08:44:58 +0200 Subject: [PATCH 145/551] Allow extra field when inserting with prepared queries Let's say you have a message: ``` class Address(object): def __init__(self, street, zipcode, **kwargs): self.street = street self.zipcode = zipcode cluster.register_user_type('mykeyspace', 'address', Address) ``` And let's say the type actually contains another field, let's call i `raw_address` Then inserting data through a prepared statement will actually fail : the driver will complain `raw_address` is missing. This change addresses that, as any field should be optional. --- cassandra/cqltypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 7946a63af8..88a2b5fd4b 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -1026,7 +1026,7 @@ def serialize_safe(cls, val, protocol_version): try: item = val[i] except TypeError: - item = getattr(val, fieldname) + item = getattr(val, fieldname, None) if item is not None: packed_item = subtype.to_binary(item, proto_version) From bc3a862866e3e568a38fe78b617f676d9c198626 Mon Sep 17 00:00:00 2001 From: Theo Mathieu Date: Wed, 26 Apr 2023 14:03:04 +0200 Subject: [PATCH 146/551] feat: wip --- cassandra/cqltypes.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 88a2b5fd4b..8167b3b894 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -1027,6 +1027,8 @@ def serialize_safe(cls, val, protocol_version): item = val[i] except TypeError: item = getattr(val, fieldname, None) + if item is None and not hasattr(val, fieldname): + log.warning(f"field {fieldname} is part of the UDT {cls.typename} but is not present in the value {val}") if item is not None: packed_item = subtype.to_binary(item, proto_version) From bc327266a0416a3ad95c26ac54b2a42be9e2fe15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 28 Mar 2023 16:23:52 +0200 Subject: [PATCH 147/551] Use `pip install -e .` Without `-e`, driver is installed into site-packages, and then installed version conflicts with source. This has caused me problems many times during development, because source version doesn't work (native libraries) are not compiled. `-e` causes driver to be installed in-place, so there are no more conflicts and import issues. --- ci/run_integration_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh index 72fa1901b0..0b34e57772 100755 --- a/ci/run_integration_test.sh +++ b/ci/run_integration_test.sh @@ -23,7 +23,7 @@ pip install -U pip wheel setuptools # install driver wheel pip install --ignore-installed -r test-requirements.txt pytest -pip install . +pip install -e . # download awscli pip install awscli From 392057ef01aa72134c11f8347e1b30246b18b958 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 27 Mar 2023 18:03:54 +0200 Subject: [PATCH 148/551] pytest.ini: Enable strict xfail Tests marked as xfailing should be failing - if they are not, it means something in Scylla changed and we need to adapt the tests. This attribute means that a passing xtest will cause a failure. --- pytest.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytest.ini b/pytest.ini index 70ce703622..0846273427 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,4 +1,5 @@ [pytest] log_format = %(asctime)s.%(msecs)03d %(levelname)s [%(module)s:%(lineno)s]: %(message)s log_level = DEBUG -log_date_format = %Y-%m-%d %H:%M:%S \ No newline at end of file +log_date_format = %Y-%m-%d %H:%M:%S +xfail_strict=true From 2c00eebceaa24f7c9b819b14042a08fc2434c637 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 10 Mar 2023 17:08:46 +0100 Subject: [PATCH 149/551] tests/integration: Rename some skip/xfail decorators Renamed some decorators (those that I previously added) from lowercase to lowercase with underscores (as advised by PEP 8 for functions - which a decorator is). I changed newly added decorators, didn't yet touch older ones - still need to decide wheter to do it. --- tests/integration/__init__.py | 4 +-- .../cqlengine/management/test_management.py | 4 +-- .../integration/cqlengine/query/test_named.py | 4 +-- .../cqlengine/query/test_queryset.py | 26 +++++++++---------- .../statements/test_base_statement.py | 4 +-- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7530e87451..8ce1e0a4b3 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -366,8 +366,8 @@ def _id_and_mark(f): lessthandse51 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('5.1'), "DSE version less than 5.1 required") lessthandse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('6.0'), "DSE version less than 6.0 required") -requirescollectionindexes = unittest.skipUnless(SCYLLA_VERSION is None or Version(SCYLLA_VERSION.split(':')[1]) >= Version('5.2'), 'Test requires Scylla >= 5.2 or Cassandra') -requirescustomindexes = unittest.skipUnless(SCYLLA_VERSION is None, 'Currently, Scylla does not support SASI or any other CUSTOM INDEX class.') +requires_collection_indexes = unittest.skipUnless(SCYLLA_VERSION is None or Version(SCYLLA_VERSION.split(':')[1]) >= Version('5.2'), 'Test requires Scylla >= 5.2 or Cassandra') +requires_custom_indexes = unittest.skipUnless(SCYLLA_VERSION is None, 'Currently, Scylla does not support SASI or any other CUSTOM INDEX class.') pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index fd6c7c4f09..a758a89f0a 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -24,7 +24,7 @@ from cassandra.cqlengine.models import Model from cassandra.cqlengine import columns -from tests.integration import DSE_VERSION, PROTOCOL_VERSION, greaterthancass20, requirescollectionindexes, MockLoggingHandler, CASSANDRA_VERSION +from tests.integration import DSE_VERSION, PROTOCOL_VERSION, greaterthancass20, requires_collection_indexes, MockLoggingHandler, CASSANDRA_VERSION from tests.integration.cqlengine.base import BaseCassEngTestCase from tests.integration.cqlengine.query.test_queryset import TestModel from cassandra.cqlengine.usertype import UserType @@ -427,7 +427,7 @@ def test_sync_index_case_sensitive(self): self.assertIsNotNone(management._get_index_name_by_column(table_meta, 'second_key')) @greaterthancass20 - @requirescollectionindexes + @requires_collection_indexes def test_sync_indexed_set(self): """ Tests that models that have container types with indices can be synced. diff --git a/tests/integration/cqlengine/query/test_named.py b/tests/integration/cqlengine/query/test_named.py index 9dee3055cd..0d5ba38200 100644 --- a/tests/integration/cqlengine/query/test_named.py +++ b/tests/integration/cqlengine/query/test_named.py @@ -27,7 +27,7 @@ from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage -from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthanorequalcass30, requirescollectionindexes +from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthanorequalcass30, requires_collection_indexes class TestQuerySetOperation(BaseCassEngTestCase): @@ -118,7 +118,7 @@ def test_query_expression_where_clause_generation(self): self.assertIsInstance(where.operator, GreaterThanOrEqualOperator) self.assertEqual(where.value, 1) -@requirescollectionindexes +@requires_collection_indexes class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): @classmethod diff --git a/tests/integration/cqlengine/query/test_queryset.py b/tests/integration/cqlengine/query/test_queryset.py index 4901f011f5..a2f9f23d48 100644 --- a/tests/integration/cqlengine/query/test_queryset.py +++ b/tests/integration/cqlengine/query/test_queryset.py @@ -39,7 +39,7 @@ from cassandra.util import uuid_from_time from cassandra.cqlengine.connection import get_session from tests.integration import PROTOCOL_VERSION, CASSANDRA_VERSION, greaterthancass20, greaterthancass21, \ - greaterthanorequalcass30, TestCluster, requirescollectionindexes + greaterthanorequalcass30, TestCluster, requires_collection_indexes from tests.integration.cqlengine import execute_count, DEFAULT_KEYSPACE @@ -384,7 +384,7 @@ def tearDownClass(cls): drop_table(CustomIndexedTestModel) drop_table(TestMultiClusteringModel) -@requirescollectionindexes +@requires_collection_indexes class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage): @execute_count(2) @@ -558,7 +558,7 @@ class NonEqualityFilteringModel(Model): num = qa.count() assert num == 1, num -@requirescollectionindexes +@requires_collection_indexes class TestQuerySetDistinct(BaseQuerySetUsage): @execute_count(1) @@ -597,7 +597,7 @@ def test_distinct_with_explicit_count(self): self.assertEqual(q.count(), 2) -@requirescollectionindexes +@requires_collection_indexes class TestQuerySetOrdering(BaseQuerySetUsage): @execute_count(2) def test_order_by_success_case(self): @@ -646,7 +646,7 @@ def test_ordering_on_multiple_clustering_columns(self): assert [r.three for r in results] == [1, 2, 3, 4, 5] -@requirescollectionindexes +@requires_collection_indexes class TestQuerySetSlicing(BaseQuerySetUsage): @execute_count(1) @@ -701,7 +701,7 @@ def test_negative_slicing(self): self.assertEqual(model.attempt_id, expect) -@requirescollectionindexes +@requires_collection_indexes class TestQuerySetValidation(BaseQuerySetUsage): def test_primary_key_or_index_must_be_specified(self): @@ -783,7 +783,7 @@ def test_custom_indexed_field_can_be_queried(self): list(CustomIndexedTestModel.objects.filter(test_id=1, description='test')) -@requirescollectionindexes +@requires_collection_indexes class TestQuerySetDelete(BaseQuerySetUsage): @execute_count(9) @@ -942,7 +942,7 @@ def test_success_case(self): assert '4' in datas -@requirescollectionindexes +@requires_collection_indexes class TestInOperator(BaseQuerySetUsage): @execute_count(1) def test_kwarg_success_case(self): @@ -1003,7 +1003,7 @@ class bool_model2(Model): @greaterthancass20 -@requirescollectionindexes +@requires_collection_indexes class TestContainsOperator(BaseQuerySetUsage): @execute_count(6) @@ -1069,7 +1069,7 @@ def test_query_expression_success_case(self): self.assertEqual(q.count(), 0) -@requirescollectionindexes +@requires_collection_indexes class TestValuesList(BaseQuerySetUsage): @execute_count(2) @@ -1082,7 +1082,7 @@ def test_values_list(self): assert item == 10 -@requirescollectionindexes +@requires_collection_indexes class TestObjectsProperty(BaseQuerySetUsage): @execute_count(1) def test_objects_property_returns_fresh_queryset(self): @@ -1113,7 +1113,7 @@ class PagingTest(Model): assert len(results) == 2 -@requirescollectionindexes +@requires_collection_indexes class ModelQuerySetTimeoutTestCase(BaseQuerySetUsage): def test_default_timeout(self): with mock.patch.object(Session, 'execute') as mock_execute: @@ -1131,7 +1131,7 @@ def test_none_timeout(self): self.assertEqual(mock_execute.call_args[-1]['timeout'], None) -@requirescollectionindexes +@requires_collection_indexes class DMLQueryTimeoutTestCase(BaseQuerySetUsage): def setUp(self): self.model = TestModel(test_id=1, attempt_id=1, description='timeout test') diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 0b48096f61..25ed0c9cb4 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -26,7 +26,7 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase, TestQueryUpdateModel from tests.integration.cqlengine import DEFAULT_KEYSPACE -from tests.integration import greaterthanorequalcass3_10, requirescustomindexes, TestCluster +from tests.integration import greaterthanorequalcass3_10, requires_custom_indexes, TestCluster from cassandra.cqlengine.connection import execute @@ -102,7 +102,7 @@ def test_insert_statement_execute(self): self.assertEqual(TestQueryUpdateModel.objects.count(), 0) @greaterthanorequalcass3_10 - @requirescustomindexes + @requires_custom_indexes def test_like_operator(self): """ Test to verify the like operator works appropriately From 92abab27d2956fa751cae4e64b3c32255835762d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Sat, 11 Mar 2023 00:13:44 +0100 Subject: [PATCH 150/551] tests/integration: Switch new decorators to pytest.mark.xfail It has some advantages, explained in the comment them. For completeness, I'm copying this comment here: # pytest.mark.xfail instead of unittest.expectedFailure because # 1. unittest doesn't skip setUpClass when used on class and we need it sometimes # 2. unittest doesn't have conditional xfail, and I prefer to use pytest than custom decorator # 3. unittest doesn't have a reason argument, so you don't see the reason in pytest report In the future all decorators should probably be switched over. --- tests/integration/__init__.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 8ce1e0a4b3..49458baf9f 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -34,6 +34,7 @@ from itertools import groupby import six import shutil +import pytest from cassandra import OperationTimedOut, ReadTimeout, ReadFailure, WriteTimeout, WriteFailure, AlreadyExists,\ @@ -366,8 +367,14 @@ def _id_and_mark(f): lessthandse51 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('5.1'), "DSE version less than 5.1 required") lessthandse60 = unittest.skipUnless(DSE_VERSION and DSE_VERSION < Version('6.0'), "DSE version less than 6.0 required") -requires_collection_indexes = unittest.skipUnless(SCYLLA_VERSION is None or Version(SCYLLA_VERSION.split(':')[1]) >= Version('5.2'), 'Test requires Scylla >= 5.2 or Cassandra') -requires_custom_indexes = unittest.skipUnless(SCYLLA_VERSION is None, 'Currently, Scylla does not support SASI or any other CUSTOM INDEX class.') +# pytest.mark.xfail instead of unittest.expectedFailure because +# 1. unittest doesn't skip setUpClass when used on class and we need it sometimes +# 2. unittest doesn't have conditional xfail, and I prefer to use pytest than custom decorator +# 3. unittest doesn't have a reason argument, so you don't see the reason in pytest report +requires_collection_indexes = pytest.mark.xfail(SCYLLA_VERSION is not None and Version(SCYLLA_VERSION.split(':')[1]) < Version('5.2'), + reason='Scylla supports collection indexes from 5.2 onwards') +requires_custom_indexes = pytest.mark.xfail(SCYLLA_VERSION is not None, + reason='Scylla does not support SASI or any other CUSTOM INDEX class') pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") From 5122080a9b69d122291085be1c086187628c1180 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 10 Mar 2023 17:16:47 +0100 Subject: [PATCH 151/551] test_query.py: Fix usage of MockLoggingHandler Previous code was incorrect and couldn't possibly work. --- tests/integration/standard/test_query.py | 136 +++++++++++------------ 1 file changed, 65 insertions(+), 71 deletions(-) diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index cd402fdc96..801ee0fd7c 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -503,15 +503,6 @@ def test_prepared_metadata_generation(self): class PreparedStatementArgTest(unittest.TestCase): - - def setUp(self): - self.mock_handler = MockLoggingHandler() - self.logger = logging.getLogger(cluster.__name__) - self.logger.addHandler(self.mock_handler) - - def tearDown(self): - self.logger.removeHandler(self.mock_handler) - def test_prepare_on_all_hosts(self): """ Test to validate prepare_on_all_hosts flag is honored. @@ -523,14 +514,15 @@ def test_prepare_on_all_hosts(self): @jira_ticket PYTHON-556 @expected_result queries will have to re-prepared on hosts that aren't the control connection """ - clus = TestCluster(prepare_on_all_hosts=False, reprepare_on_up=False) - self.addCleanup(clus.shutdown) + with MockLoggingHandler().set_module_name(cluster.__name__) as mock_handler: + clus = TestCluster(prepare_on_all_hosts=False, reprepare_on_up=False) + self.addCleanup(clus.shutdown) - session = clus.connect(wait_for_all_pools=True) - select_statement = session.prepare("SELECT k FROM test3rf.test WHERE k = ?") - for host in clus.metadata.all_hosts(): - session.execute(select_statement, (1, ), host=host) - self.assertEqual(2, self.mock_handler.get_message_count('debug', "Re-preparing")) + session = clus.connect(wait_for_all_pools=True) + select_statement = session.prepare("SELECT k FROM test3rf.test WHERE k = ?") + for host in clus.metadata.all_hosts(): + session.execute(select_statement, (1, ), host=host) + self.assertEqual(2, mock_handler.get_message_count('debug', "Re-preparing")) def test_prepare_batch_statement(self): """ @@ -542,39 +534,40 @@ def test_prepare_batch_statement(self): @expected_result queries will have to re-prepared on hosts that aren't the control connection and the batch statement will be sent. """ - policy = ForcedHostIndexPolicy() - clus = TestCluster( - execution_profiles={ - EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=policy), - }, - prepare_on_all_hosts=False, - reprepare_on_up=False, - ) - self.addCleanup(clus.shutdown) + with MockLoggingHandler().set_module_name(cluster.__name__) as mock_handler: + policy = ForcedHostIndexPolicy() + clus = TestCluster( + execution_profiles={ + EXEC_PROFILE_DEFAULT: ExecutionProfile(load_balancing_policy=policy), + }, + prepare_on_all_hosts=False, + reprepare_on_up=False, + ) + self.addCleanup(clus.shutdown) - table = "test3rf.%s" % self._testMethodName.lower() + table = "test3rf.%s" % self._testMethodName.lower() - session = clus.connect(wait_for_all_pools=True) + session = clus.connect(wait_for_all_pools=True) - session.execute("DROP TABLE IF EXISTS %s" % table) - session.execute("CREATE TABLE %s (k int PRIMARY KEY, v int )" % table) + session.execute("DROP TABLE IF EXISTS %s" % table) + session.execute("CREATE TABLE %s (k int PRIMARY KEY, v int )" % table) - insert_statement = session.prepare("INSERT INTO %s (k, v) VALUES (?, ?)" % table) + insert_statement = session.prepare("INSERT INTO %s (k, v) VALUES (?, ?)" % table) - # This is going to query a host where the query - # is not prepared - policy.set_host(1) - batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) - batch_statement.add(insert_statement, (1, 2)) - session.execute(batch_statement) + # This is going to query a host where the query + # is not prepared + policy.set_host(1) + batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) + batch_statement.add(insert_statement, (1, 2)) + session.execute(batch_statement) - # To verify our test assumption that queries are getting re-prepared properly - self.assertEqual(1, self.mock_handler.get_message_count('debug', "Re-preparing")) + # To verify our test assumption that queries are getting re-prepared properly + self.assertEqual(1, mock_handler.get_message_count('debug', "Re-preparing")) - select_results = session.execute(SimpleStatement("SELECT * FROM %s WHERE k = 1" % table, - consistency_level=ConsistencyLevel.ALL)) - first_row = select_results[0][:2] - self.assertEqual((1, 2), first_row) + select_results = session.execute(SimpleStatement("SELECT * FROM %s WHERE k = 1" % table, + consistency_level=ConsistencyLevel.ALL)) + first_row = select_results[0][:2] + self.assertEqual((1, 2), first_row) def test_prepare_batch_statement_after_alter(self): """ @@ -587,44 +580,45 @@ def test_prepare_batch_statement_after_alter(self): @expected_result queries will have to re-prepared on hosts that aren't the control connection and the batch statement will be sent. """ - clus = TestCluster(prepare_on_all_hosts=False, reprepare_on_up=False) - self.addCleanup(clus.shutdown) + with MockLoggingHandler().set_module_name(cluster.__name__) as mock_handler: + clus = TestCluster(prepare_on_all_hosts=False, reprepare_on_up=False) + self.addCleanup(clus.shutdown) - table = "test3rf.%s" % self._testMethodName.lower() + table = "test3rf.%s" % self._testMethodName.lower() - session = clus.connect(wait_for_all_pools=True) + session = clus.connect(wait_for_all_pools=True) - session.execute("DROP TABLE IF EXISTS %s" % table) - session.execute("CREATE TABLE %s (k int PRIMARY KEY, a int, b int, d int)" % table) - insert_statement = session.prepare("INSERT INTO %s (k, b, d) VALUES (?, ?, ?)" % table) + session.execute("DROP TABLE IF EXISTS %s" % table) + session.execute("CREATE TABLE %s (k int PRIMARY KEY, a int, b int, d int)" % table) + insert_statement = session.prepare("INSERT INTO %s (k, b, d) VALUES (?, ?, ?)" % table) - # Altering the table might trigger an update in the insert metadata - session.execute("ALTER TABLE %s ADD c int" % table) + # Altering the table might trigger an update in the insert metadata + session.execute("ALTER TABLE %s ADD c int" % table) - values_to_insert = [(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)] + values_to_insert = [(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)] - # We query the three hosts in order (due to the ForcedHostIndexPolicy) - # the first three queries will have to be repreapred and the rest should - # work as normal batch prepared statements - hosts = clus.metadata.all_hosts() - for i in range(10): - value_to_insert = values_to_insert[i % len(values_to_insert)] - batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) - batch_statement.add(insert_statement, value_to_insert) - session.execute(batch_statement, host=hosts[i % len(hosts)]) + # We query the three hosts in order (due to the ForcedHostIndexPolicy) + # the first three queries will have to be repreapred and the rest should + # work as normal batch prepared statements + hosts = clus.metadata.all_hosts() + for i in range(10): + value_to_insert = values_to_insert[i % len(values_to_insert)] + batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) + batch_statement.add(insert_statement, value_to_insert) + session.execute(batch_statement, host=hosts[i % len(hosts)]) - select_results = session.execute("SELECT * FROM %s" % table) - expected_results = [ - (1, None, 2, None, 3), - (2, None, 3, None, 4), - (3, None, 4, None, 5), - (4, None, 5, None, 6) - ] + select_results = session.execute("SELECT * FROM %s" % table) + expected_results = [ + (1, None, 2, None, 3), + (2, None, 3, None, 4), + (3, None, 4, None, 5), + (4, None, 5, None, 6) + ] - self.assertEqual(set(expected_results), set(select_results._current_rows)) + self.assertEqual(set(expected_results), set(select_results._current_rows)) - # To verify our test assumption that queries are getting re-prepared properly - self.assertEqual(3, self.mock_handler.get_message_count('debug', "Re-preparing")) + # To verify our test assumption that queries are getting re-prepared properly + self.assertEqual(3, mock_handler.get_message_count('debug', "Re-preparing")) class PrintStatementTests(unittest.TestCase): From 80604899c87337d40dcf7a9a2f5769060f829773 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 10 Mar 2023 23:56:32 +0100 Subject: [PATCH 152/551] test_custom_cluster.py: Increase startup timeout Scylla need more time to start which caused the test to fail. Increasing to 60 seconds was not enough, 120 seems to work. --- tests/integration/standard/test_custom_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_custom_cluster.py b/tests/integration/standard/test_custom_cluster.py index d0f10d51db..6cdfb8d1c3 100644 --- a/tests/integration/standard/test_custom_cluster.py +++ b/tests/integration/standard/test_custom_cluster.py @@ -30,7 +30,7 @@ def setup_module(): # wait until all nodes are up wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.1'], port=9046).connect().shutdown(), 1, 20) wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.2'], port=9046).connect().shutdown(), 1, 20) - wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.3'], port=9046).connect().shutdown(), 1, 20) + wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.3'], port=9046).connect().shutdown(), 1, 120) def teardown_module(): From 45c4f50cfdb606ede4193478545489833444b01e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Sat, 11 Mar 2023 00:01:18 +0100 Subject: [PATCH 153/551] Reenable test_authentication_misconfiguration.py This test was previously disabled because required functionality was not implemented in CCM. This tests currently passes - and I manually inspected the logs to make sure node3 really has authentication enabled. --- .../standard/test_authentication_misconfiguration.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tests/integration/standard/test_authentication_misconfiguration.py b/tests/integration/standard/test_authentication_misconfiguration.py index bb67c987cc..546141d801 100644 --- a/tests/integration/standard/test_authentication_misconfiguration.py +++ b/tests/integration/standard/test_authentication_misconfiguration.py @@ -19,12 +19,6 @@ class MisconfiguredAuthenticationTests(unittest.TestCase): """ One node (not the contact point) has password auth. The rest of the nodes have no auth """ - # TODO: Fix ccm to apply following options to scylla.yaml - # node3.set_configuration_options(values={ - # 'authenticator': 'PasswordAuthenticator', - # 'authorizer': 'CassandraAuthorizer', - # }) - # To make it working for scylla @classmethod def setUpClass(cls): if not USE_CASS_EXTERNAL: @@ -38,7 +32,6 @@ def setUpClass(cls): cls.ccm_cluster = ccm_cluster - @unittest.expectedFailure def test_connect_no_auth_provider(self): cluster = TestCluster() cluster.connect() From def770c10cb468b7f2476114413a80b40a7c6640 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Sat, 11 Mar 2023 00:22:04 +0100 Subject: [PATCH 154/551] Integration test: clearer skip/xfail labels Many tests failing on Scylla were labeled with unconditional xfail. This has some problems: - It's hard to tell why a test was marked. - When some functionality is implemented in Scylla, we don't have an easy way to reenable tests that use this functionality. - Test is skipped also when testing with Cassandra This commit introduces more labels for failing tests. It fixes those problems: - Label name and reason string explain why test is disabled - We can edit label definition to enable tests on newer Scylla version - Tests are only skipped in environment where they are expected to fail --- tests/integration/__init__.py | 9 +++++ .../standard/test_client_warnings.py | 7 ++-- tests/integration/standard/test_cluster.py | 13 +++----- .../standard/test_custom_payload.py | 11 ++----- tests/integration/standard/test_metadata.py | 33 +++++++++---------- tests/integration/standard/test_query.py | 5 ++- tests/integration/standard/test_types.py | 4 +-- 7 files changed, 41 insertions(+), 41 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 49458baf9f..9a40a62d59 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -344,6 +344,7 @@ def _id_and_mark(f): local = local_decorator_creator() notprotocolv1 = unittest.skipUnless(PROTOCOL_VERSION > 1, 'Protocol v1 not supported') lessthenprotocolv4 = unittest.skipUnless(PROTOCOL_VERSION < 4, 'Protocol versions 4 or greater not supported') +lessthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION < 3, 'Protocol versions 3 or greater not supported') greaterthanprotocolv3 = unittest.skipUnless(PROTOCOL_VERSION >= 4, 'Protocol versions less than 4 are not supported') protocolv6 = unittest.skipUnless(6 in get_supported_protocol_versions(), 'Protocol versions less than 6 are not supported') @@ -375,6 +376,14 @@ def _id_and_mark(f): reason='Scylla supports collection indexes from 5.2 onwards') requires_custom_indexes = pytest.mark.xfail(SCYLLA_VERSION is not None, reason='Scylla does not support SASI or any other CUSTOM INDEX class') +requires_java_udf = pytest.mark.xfail(SCYLLA_VERSION is not None, + reason='Scylla does not support UDFs written in Java') +requires_composite_type = pytest.mark.xfail(SCYLLA_VERSION is not None, + reason='Scylla does not support composite types') +requires_custom_payload = pytest.mark.xfail(SCYLLA_VERSION is not None or PROTOCOL_VERSION < 4, + reason='Scylla does not support custom payloads. Cassandra requires native protocol v4.0+') +xfail_scylla = lambda reason, *args, **kwargs: pytest.mark.xfail(SCYLLA_VERSION is not None, reason=reason, *args, **kwargs) +incorrect_test = lambda reason='This test seems to be incorrect and should be fixed', *args, **kwargs: pytest.mark.xfail(reason=reason, *args, **kwargs) pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 148c2b1187..6d5e040e32 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -18,7 +18,8 @@ import six from cassandra.query import BatchStatement -from tests.integration import use_singledc, PROTOCOL_VERSION, local, TestCluster +from tests.integration import (use_singledc, PROTOCOL_VERSION, local, TestCluster, + requires_custom_payload, xfail_scylla) def setup_module(): @@ -27,7 +28,7 @@ def setup_module(): # Failing with scylla because there is no warning message when changing the value of 'batch_size_warn_threshold_in_kb' # config") -@unittest.expectedFailure +@xfail_scylla('Empty warnings: TypeError: object of type \'NoneType\' has no len()') class ClientWarningTests(unittest.TestCase): @classmethod @@ -94,6 +95,7 @@ def test_warning_with_trace(self): self.assertIsNotNone(future.get_query_trace()) @local + @requires_custom_payload def test_warning_with_custom_payload(self): """ Test to validate client warning with custom payload @@ -113,6 +115,7 @@ def test_warning_with_custom_payload(self): self.assertDictEqual(future.custom_payload, payload) @local + @requires_custom_payload def test_warning_with_trace_and_custom_payload(self): """ Test to validate client warning with tracing and client warning diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 76978038ea..195c112ffd 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -42,8 +42,8 @@ from tests import notwindows from tests.integration import use_singledc, get_server_versions, CASSANDRA_VERSION, \ execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ - get_unsupported_upper_protocol, protocolv6, local, CASSANDRA_IP, greaterthanorequalcass30, lessthanorequalcass40, \ - DSE_VERSION, TestCluster, PROTOCOL_VERSION + get_unsupported_upper_protocol, lessthanprotocolv3, protocolv6, local, CASSANDRA_IP, greaterthanorequalcass30, \ + lessthanorequalcass40, DSE_VERSION, TestCluster, PROTOCOL_VERSION, xfail_scylla, incorrect_test from tests.integration.util import assert_quiescent_pool_state import sys @@ -289,8 +289,7 @@ def test_protocol_negotiation(self): cluster.shutdown() - # "Failing with scylla because there is option to create a cluster with 'lower bound' protocol - @unittest.expectedFailure + @xfail_scylla("Failing with scylla because there is option to create a cluster with 'lower bound' protocol") def test_invalid_protocol_negotation(self): """ Test for protocol negotiation when explicit versions are set @@ -411,12 +410,11 @@ def test_connect_to_bad_hosts(self): protocol_version=PROTOCOL_VERSION) self.assertRaises(NoHostAvailable, cluster.connect) + @lessthanprotocolv3 def test_cluster_settings(self): """ Test connection setting getters and setters """ - if PROTOCOL_VERSION >= 3: - raise unittest.SkipTest("min/max requests and core/max conns aren't used with v3 protocol") cluster = TestCluster() @@ -1228,8 +1226,7 @@ def test_replicas_are_queried(self): @greaterthanorequalcass30 @lessthanorequalcass40 - # The scylla failed because 'Unknown identifier column1' - @unittest.expectedFailure + @incorrect_test() def test_compact_option(self): """ Test the driver can connect with the no_compact option and the results diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index 20efe1c79a..fd0a94c419 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -19,7 +19,8 @@ from cassandra.query import (SimpleStatement, BatchStatement, BatchType) -from tests.integration import use_singledc, PROTOCOL_VERSION, local, TestCluster +from tests.integration import (use_singledc, PROTOCOL_VERSION, local, TestCluster, + requires_custom_payload) def setup_module(): @@ -28,13 +29,10 @@ def setup_module(): #These test rely on the custom payload being returned but by default C* #ignores all the payloads. @local +@requires_custom_payload class CustomPayloadTests(unittest.TestCase): def setUp(self): - if PROTOCOL_VERSION < 4: - raise unittest.SkipTest( - "Native protocol 4,0+ is required for custom payloads, currently using %r" - % (PROTOCOL_VERSION,)) self.cluster = TestCluster() self.session = self.cluster.connect() @@ -43,7 +41,6 @@ def tearDown(self): self.cluster.shutdown() # Scylla error: 'truncated frame: expected 65540 bytes, length is 64' - @unittest.expectedFailure def test_custom_query_basic(self): """ Test to validate that custom payloads work with simple queries @@ -67,7 +64,6 @@ def test_custom_query_basic(self): self.validate_various_custom_payloads(statement=statement) # Scylla error: 'Invalid query kind in BATCH messages. Must be 0 or 1 but got 4'" - @unittest.expectedFailure def test_custom_query_batching(self): """ Test to validate that custom payloads work with batch queries @@ -94,7 +90,6 @@ def test_custom_query_batching(self): # Scylla error: 'Got different query ID in server response (b'\x00') than we had before # (b'\x84P\xd0K0\xe2=\x11\xba\x02\x16W\xfatN\xf1')'") - @unittest.expectedFailure def test_custom_query_prepared(self): """ Test to validate that custom payloads work with prepared queries diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index eef89b642c..2a77ec1092 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -39,7 +39,8 @@ get_supported_protocol_versions, greaterthancass20, greaterthancass21, assert_startswith, greaterthanorequalcass40, greaterthanorequaldse67, lessthancass40, - TestCluster, DSE_VERSION) + TestCluster, DSE_VERSION, requires_java_udf, requires_composite_type, + requires_collection_indexes, xfail_scylla) from tests.util import wait_until @@ -474,7 +475,7 @@ def test_counter_with_dense_compact_storage(self): tablemeta = self.get_table_metadata() self.check_create_statement(tablemeta, create_statement) - @unittest.expectedFailure + @xfail_scylla('https://github.com/scylladb/scylladb/issues/6058') def test_indexes(self): create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"]) create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)" @@ -500,7 +501,7 @@ def test_indexes(self): self.assertIn('CREATE INDEX e_index', statement) @greaterthancass21 - @unittest.expectedFailure + @requires_collection_indexes def test_collection_indexes(self): self.session.execute("CREATE TABLE %s.%s (a int PRIMARY KEY, b map)" @@ -530,7 +531,8 @@ def test_collection_indexes(self): tablemeta = self.get_table_metadata() self.assertIn('(full(b))', tablemeta.export_as_string()) - @unittest.expectedFailure + #TODO: Fix Scylla or test + @xfail_scylla('Scylla prints `compression = {}` instead of `compression = {\'enabled\': \'false\'}`.') def test_compression_disabled(self): create_statement = self.make_create_statement(["a"], ["b"], ["c"]) create_statement += " WITH compression = {}" @@ -565,7 +567,7 @@ def test_non_size_tiered_compaction(self): self.assertNotIn("min_threshold", cql) self.assertNotIn("max_threshold", cql) - @unittest.expectedFailure + @requires_java_udf def test_refresh_schema_metadata(self): """ test for synchronously refreshing all cluster metadata @@ -838,7 +840,7 @@ def test_refresh_user_type_metadata_proto_2(self): self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {}) cluster.shutdown() - @unittest.expectedFailure + @requires_java_udf def test_refresh_user_function_metadata(self): """ test for synchronously refreshing UDF metadata in keyspace @@ -875,7 +877,7 @@ def test_refresh_user_function_metadata(self): cluster2.shutdown() - @unittest.expectedFailure + @requires_java_udf def test_refresh_user_aggregate_metadata(self): """ test for synchronously refreshing UDA metadata in keyspace @@ -919,7 +921,7 @@ def test_refresh_user_aggregate_metadata(self): cluster2.shutdown() @greaterthanorequalcass30 - @unittest.expectedFailure + @requires_collection_indexes def test_multiple_indices(self): """ test multiple indices on the same column. @@ -1544,7 +1546,7 @@ def __init__(self, test_case, **kwargs): super(FunctionTest.VerifiedAggregate, self).__init__(test_case, Aggregate, test_case.keyspace_aggregate_meta, **kwargs) -@unittest.expectedFailure +@requires_java_udf class FunctionMetadata(FunctionTest): def make_function_kwargs(self, called_on_null=True): @@ -1699,6 +1701,7 @@ def test_function_cql_called_on_null(self): self.assertRegex(fn_meta.as_cql_query(), "CREATE FUNCTION.*\) RETURNS NULL ON NULL INPUT RETURNS .*") +@requires_java_udf class AggregateMetadata(FunctionTest): @classmethod @@ -1743,7 +1746,6 @@ def make_aggregate_kwargs(self, state_func, state_type, final_func=None, init_co 'return_type': "does not matter for creation", 'deterministic': False} - @unittest.expectedFailure def test_return_type_meta(self): """ Test to verify to that the return type of a an aggregate is honored in the metadata @@ -1761,7 +1763,6 @@ def test_return_type_meta(self): with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='1')) as va: self.assertEqual(self.keyspace_aggregate_meta[va.signature].return_type, 'int') - @unittest.expectedFailure def test_init_cond(self): """ Test to verify that various initial conditions are correctly surfaced in various aggregate functions @@ -1812,7 +1813,6 @@ def test_init_cond(self): self.assertDictContainsSubset(init_not_updated, map_res) c.shutdown() - @unittest.expectedFailure def test_aggregates_after_functions(self): """ Test to verify that aggregates are listed after function in metadata @@ -1835,7 +1835,6 @@ def test_aggregates_after_functions(self): self.assertNotIn(-1, (aggregate_idx, func_idx), "AGGREGATE or FUNCTION not found in keyspace_cql: " + keyspace_cql) self.assertGreater(aggregate_idx, func_idx) - @unittest.expectedFailure def test_same_name_diff_types(self): """ Test to verify to that aggregates with different signatures are differentiated in metadata @@ -1858,7 +1857,6 @@ def test_same_name_diff_types(self): self.assertEqual(len(aggregates), 2) self.assertNotEqual(aggregates[0].argument_types, aggregates[1].argument_types) - @unittest.expectedFailure def test_aggregates_follow_keyspace_alter(self): """ Test to verify to that aggregates maintain equality after a keyspace is altered @@ -1883,7 +1881,6 @@ def test_aggregates_follow_keyspace_alter(self): finally: self.session.execute('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name) - @unittest.expectedFailure def test_cql_optional_params(self): """ Test to verify that the initial_cond and final_func parameters are correctly honored @@ -2018,7 +2015,7 @@ def test_bad_user_type(self): self.assertIn("/*\nWarning:", m.export_as_string()) @greaterthancass21 - @unittest.expectedFailure + @requires_java_udf def test_bad_user_function(self): self.session.execute("""CREATE FUNCTION IF NOT EXISTS %s (key int, val int) RETURNS NULL ON NULL INPUT @@ -2037,7 +2034,7 @@ def test_bad_user_function(self): self.assertIn("/*\nWarning:", m.export_as_string()) @greaterthancass21 - @unittest.expectedFailure + @requires_java_udf def test_bad_user_aggregate(self): self.session.execute("""CREATE FUNCTION IF NOT EXISTS sum_int (key int, val int) RETURNS NULL ON NULL INPUT @@ -2058,7 +2055,7 @@ def test_bad_user_aggregate(self): class DynamicCompositeTypeTest(BasicSharedKeyspaceUnitTestCase): - @unittest.expectedFailure + @requires_composite_type def test_dct_alias(self): """ Tests to make sure DCT's have correct string formatting diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 801ee0fd7c..47c4ca2ef2 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -25,7 +25,7 @@ from cassandra.policies import HostDistance, RoundRobinPolicy, WhiteListRoundRobinPolicy from tests.integration import use_singledc, PROTOCOL_VERSION, BasicSharedKeyspaceUnitTestCase, \ greaterthanprotocolv3, MockLoggingHandler, get_supported_protocol_versions, local, get_cluster, setup_keyspace, \ - USE_CASS_EXTERNAL, greaterthanorequalcass40, DSE_VERSION, TestCluster, requirecassandra + USE_CASS_EXTERNAL, greaterthanorequalcass40, DSE_VERSION, TestCluster, requirecassandra, xfail_scylla from tests import notwindows from tests.integration import greaterthanorequalcass30, get_node @@ -950,8 +950,7 @@ def test_no_connection_refused_on_timeout(self): # Make sure test passed self.assertTrue(received_timeout) - # Failed on Scylla because error `SERIAL/LOCAL_SERIAL consistency may only be requested for one partition at a time` - @unittest.expectedFailure + @xfail_scylla('Fails on Scylla with error `SERIAL/LOCAL_SERIAL consistency may only be requested for one partition at a time`') def test_was_applied_batch_stmt(self): """ Test to ensure `:attr:cassandra.cluster.ResultSet.was_applied` works as expected diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index aeec419913..bc26a3013e 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -31,7 +31,7 @@ from tests.integration import use_singledc, execute_until_pass, notprotocolv1, \ BasicSharedKeyspaceUnitTestCase, greaterthancass21, lessthancass30, greaterthanorequaldse51, \ - DSE_VERSION, greaterthanorequalcass3_10, requiredse, TestCluster + DSE_VERSION, greaterthanorequalcass3_10, requiredse, TestCluster, requires_composite_type from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, PRIMITIVE_DATATYPES_KEYS, \ get_sample, get_all_samples, get_collection_sample @@ -731,7 +731,7 @@ def test_can_insert_unicode_query_string(self): s.execute(u"SELECT * FROM system.local WHERE key = 'ef\u2052ef'") s.execute(u"SELECT * FROM system.local WHERE key = %s", (u"fe\u2051fe",)) - @unittest.expectedFailure + @requires_composite_type def test_can_read_composite_type(self): """ Test to ensure that CompositeTypes can be used in a query From a8faa90093b398ea3f7938e12ca005269576e1d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 28 Mar 2023 16:02:13 +0200 Subject: [PATCH 155/551] Use pytest.mark.xfail instead of unittest.expectedFailure. The former has a `reason` argument, and this reason is shown in test report - so it's easier to judge wheter the test should really fail. --- tests/integration/standard/test_metadata.py | 5 +++-- tests/integration/standard/test_query.py | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 2a77ec1092..f95f510d9b 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -23,6 +23,7 @@ import os from packaging.version import Version from mock import Mock, patch +import pytest from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor @@ -1209,7 +1210,7 @@ def test_export_keyspace_schema_udts(self): cluster.shutdown() @greaterthancass21 - @unittest.expectedFailure + @pytest.mark.xfail(reason='Column name in CREATE INDEX is not quoted. It\'s a bug in driver or in Scylla') def test_case_sensitivity(self): """ Test that names that need to be escaped in CREATE statements are @@ -1279,7 +1280,7 @@ def test_already_exists_exceptions(self): cluster.shutdown() @local - @unittest.expectedFailure + @pytest.mark.xfail(reason='AssertionError: \'RAC1\' != \'r1\' - probably a bug in driver or in Scylla') def test_replicas(self): """ Ensure cluster.metadata.get_replicas return correctly when not attached to keyspace diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index 47c4ca2ef2..fdab4e7a0a 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -17,6 +17,7 @@ import unittest import logging +import pytest from cassandra import ProtocolVersion from cassandra import ConsistencyLevel, Unavailable, InvalidRequest, cluster from cassandra.query import (PreparedStatement, BoundStatement, SimpleStatement, @@ -1036,8 +1037,7 @@ def test_empty_batch_statement(self): with self.assertRaises(RuntimeError): results.was_applied - # Skipping until PYTHON-943 is resolved - @unittest.expectedFailure + @pytest.mark.xfail(reason='Skipping until PYTHON-943 is resolved') def test_was_applied_batch_string(self): batch_statement = BatchStatement(BatchType.LOGGED) batch_statement.add_all(["INSERT INTO test3rf.lwt_clustering (k, c, v) VALUES (0, 0, 10);", From bafab046f6b23eb5603abdb112ab874d4740d7ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 28 Mar 2023 16:21:56 +0200 Subject: [PATCH 156/551] Skip tests requiring iptables. Tests running `sudo` are problematic to run locally during development, as you can't just run the tests - you need to watch them and wait for sudo prompt. In this test, it would be better to use some kind of proxy. As the test was already marked xfail, skip it to make development easier. --- tests/integration/standard/test_shard_aware.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/standard/test_shard_aware.py b/tests/integration/standard/test_shard_aware.py index ef2348d1b2..2234e74df4 100644 --- a/tests/integration/standard/test_shard_aware.py +++ b/tests/integration/standard/test_shard_aware.py @@ -26,6 +26,7 @@ import unittest2 as unittest except ImportError: import unittest # noqa +import pytest from cassandra.cluster import Cluster from cassandra.policies import TokenAwarePolicy, RoundRobinPolicy, ConstantReconnectionPolicy @@ -188,7 +189,7 @@ def test_closing_connections(self): time.sleep(10) self.query_data(self.session) - @unittest.expectedFailure + @pytest.mark.skip def test_blocking_connections(self): """ Verify that reconnection is working as expected, when connection are being blocked. From dca3ab7915e07dce141647ea90c950d6a5051149 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 28 Mar 2023 16:22:52 +0200 Subject: [PATCH 157/551] Fix a typo in shard aware tests --- tests/integration/standard/test_shard_aware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_shard_aware.py b/tests/integration/standard/test_shard_aware.py index 2234e74df4..d68e53801c 100644 --- a/tests/integration/standard/test_shard_aware.py +++ b/tests/integration/standard/test_shard_aware.py @@ -39,7 +39,7 @@ def setup_module(): os.environ['SCYLLA_EXT_OPTS'] = "--smp 4 --memory 2048M" - use_cluster('shared_aware', [3], start=True) + use_cluster('shard_aware', [3], start=True) class TestShardAwareIntegration(unittest.TestCase): From 1c886fdc68175a6f72720204f2789adb2b46a6d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 28 Mar 2023 16:23:27 +0200 Subject: [PATCH 158/551] test_shard_aware: Copy values from Java driver. Use shard values (keys, shard number) from Java driver tests. Java driver uses values compatible with smp=2, so with those values we'll be able to run those tests in CI (it has only 2 cores). --- tests/integration/standard/test_shard_aware.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/integration/standard/test_shard_aware.py b/tests/integration/standard/test_shard_aware.py index d68e53801c..ca689c01d4 100644 --- a/tests/integration/standard/test_shard_aware.py +++ b/tests/integration/standard/test_shard_aware.py @@ -38,7 +38,7 @@ def setup_module(): - os.environ['SCYLLA_EXT_OPTS'] = "--smp 4 --memory 2048M" + os.environ['SCYLLA_EXT_OPTS'] = "--smp 2" use_cluster('shard_aware', [3], start=True) @@ -109,7 +109,7 @@ def create_data(session): session.execute(bound) bound = prepared.bind(('e', 'f', 'g')) session.execute(bound) - bound = prepared.bind(('100000', 'f', 'g')) + bound = prepared.bind(('100002', 'f', 'g')) session.execute(bound) def query_data(self, session, verify_in_tracing=True): @@ -122,20 +122,20 @@ def query_data(self, session, verify_in_tracing=True): results = session.execute(bound, trace=True) self.assertEqual(results, [('a', 'b', 'c')]) if verify_in_tracing: - self.verify_same_shard_in_tracing(results, "shard 1") + self.verify_same_shard_in_tracing(results, "shard 0") - bound = prepared.bind(('100000', 'f')) + bound = prepared.bind(('100002', 'f')) results = session.execute(bound, trace=True) - self.assertEqual(results, [('100000', 'f', 'g')]) + self.assertEqual(results, [('100002', 'f', 'g')]) if verify_in_tracing: - self.verify_same_shard_in_tracing(results, "shard 0") + self.verify_same_shard_in_tracing(results, "shard 1") bound = prepared.bind(('e', 'f')) results = session.execute(bound, trace=True) if verify_in_tracing: - self.verify_same_shard_in_tracing(results, "shard 1") + self.verify_same_shard_in_tracing(results, "shard 0") def test_all_tracing_coming_one_shard(self): """ From dadf94959e2794279e50cd468028eff2a7f56942 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 28 Mar 2023 17:22:57 +0200 Subject: [PATCH 159/551] Enable tests/integration/standard/ in CI All the tests should be passing now, so they can be enabled. --- .github/workflows/integration-tests.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index ca6e8a1c14..c16a7a8279 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -21,5 +21,4 @@ jobs: - name: Test with pytest run: | - ./ci/run_integration_test.sh tests/integration/standard/test_authentication.py tests/integration/standard/test_cluster.py tests/integration/standard/test_concurrent.py tests/integration/standard/test_connection.py tests/integration/standard/test_control_connection.py tests/integration/standard/test_custom_payload.py tests/integration/standard/test_custom_protocol_handler.py tests/integration/standard/test_cython_protocol_handlers.py tests/integration/standard/test_scylla_cloud.py tests/integration/standard/test_use_keyspace.py tests/integration/standard/test_ip_change.py tests/integration/cqlengine/ - # can't run this, cause only 2 cpus on github actions: tests/integration/standard/test_shard_aware.py + ./ci/run_integration_test.sh tests/integration/standard/ tests/integration/cqlengine/ From 683723347a712391ebbed0be7ee1750e328ee8f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 30 Mar 2023 16:59:33 +0200 Subject: [PATCH 160/551] Fix failing test in test_cluster.py One of the tests was failing in CI, even after previous fix. This is probably caused by cluster being reused, instead of being recreated with newly set env option. --- tests/integration/standard/test_cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 195c112ffd..43a1d080ee 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -40,7 +40,7 @@ from cassandra.connection import DefaultEndPoint from tests import notwindows -from tests.integration import use_singledc, get_server_versions, CASSANDRA_VERSION, \ +from tests.integration import use_cluster, get_server_versions, CASSANDRA_VERSION, \ execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ get_unsupported_upper_protocol, lessthanprotocolv3, protocolv6, local, CASSANDRA_IP, greaterthanorequalcass30, \ lessthanorequalcass40, DSE_VERSION, TestCluster, PROTOCOL_VERSION, xfail_scylla, incorrect_test @@ -52,7 +52,7 @@ def setup_module(): os.environ['SCYLLA_EXT_OPTS'] = "--smp 1" - use_singledc() + use_cluster("cluster_tests", [3], start=True, workloads=None) warnings.simplefilter("always") From fd44512aed48de4146e8829302b6dacb52aa3c91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 13 Apr 2023 16:21:46 +0200 Subject: [PATCH 161/551] test_shard_aware.py: print tracing source When analyzing output of failed test it is useful to know which node emitted which message. --- tests/integration/standard/test_shard_aware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_shard_aware.py b/tests/integration/standard/test_shard_aware.py index ca689c01d4..01b755a0f3 100644 --- a/tests/integration/standard/test_shard_aware.py +++ b/tests/integration/standard/test_shard_aware.py @@ -60,7 +60,7 @@ def verify_same_shard_in_tracing(self, results, shard_name): traces = results.get_query_trace() events = traces.events for event in events: - LOGGER.info("%s %s", event.thread_name, event.description) + LOGGER.info("%s %s %s", event.source, event.thread_name, event.description) for event in events: self.assertEqual(event.thread_name, shard_name) self.assertIn('querying locally', "\n".join([event.description for event in events])) From 40120f284c3857447d6e58b8518cdff698381295 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 13 Apr 2023 16:22:27 +0200 Subject: [PATCH 162/551] test_shard_aware.py: skip failing test test_closing_connections is failing, for multiple reasons. Skip it until proper investigation can be performed. --- tests/integration/standard/test_shard_aware.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/standard/test_shard_aware.py b/tests/integration/standard/test_shard_aware.py index 01b755a0f3..e3d2681a5c 100644 --- a/tests/integration/standard/test_shard_aware.py +++ b/tests/integration/standard/test_shard_aware.py @@ -168,6 +168,7 @@ def test_connect_from_multiple_clients(self): for result in as_completed(futures): print(result) + @pytest.mark.skip(reason='https://github.com/scylladb/python-driver/issues/221') def test_closing_connections(self): """ Verify that reconnection is working as expected, when connection are being closed. From 5761efc92816b50bec42293e510b7ae960eeabb9 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 30 May 2023 13:33:14 +0300 Subject: [PATCH 163/551] Release 3.26.2 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 84b459fc98..d5b1944cfd 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -22,7 +22,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 1) +__version_info__ = (3, 26, 2) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 9584c7556a..94eb076275 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.1-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.2-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.26.1-scylla' +LATEST_VERSION = '3.26.2-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From cb2f91ee9d3c4f1fc5fdc9870bb83625a12edbf2 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Fri, 2 Jun 2023 11:05:35 +0100 Subject: [PATCH 164/551] docs: update theme 1.5.1 --- docs/conf.py | 3 ++- docs/pyproject.toml | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 94eb076275..ec6d2b2dd0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -102,7 +102,7 @@ # -- Options for sitemap extension --------------------------------------- -sitemap_url_scheme = 'stable/{link}' +sitemap_url_scheme = "/stable/{link}" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -113,6 +113,7 @@ 'github_issues_repository': 'scylladb/python-driver', 'hide_edit_this_page_button': 'false', 'hide_version_dropdown': ['master'], + 'hide_feedback_buttons': 'false', 'versions_unstable': UNSTABLE_VERSIONS, 'versions_deprecated': DEPRECATED_VERSIONS, } diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 4bca5f9db5..4a1656322b 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -13,12 +13,12 @@ gevent = "20.12.1" gremlinpython = "3.4.7" python = "^3.7" pyyaml = "6.0" -pygments = "2.2.0" +pygments = "2.15.1" recommonmark = "0.7.1" redirects_cli ="~0.1.2" sphinx-autobuild = "2021.3.14" -sphinx-sitemap = "2.1.0" -sphinx-scylladb-theme = "~1.4.1" +sphinx-sitemap = "2.5.0" +sphinx-scylladb-theme = "~1.5.1" sphinx-multiversion-scylla = "~0.2.11" Sphinx = "4.3.2" scales = "1.0.9" From eeb6ddc9519a99c3fe969c7daa8e14ea299da670 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 25 Jun 2023 19:58:38 +0300 Subject: [PATCH 165/551] test_scylla_cloud: align with ccm ccm had implemention of multi dc support for sni_proxy and changed the `sni_proxy_docker_id` to a list `sni_proxy_docker_ids` since the code in the test was using the new list, the sni_proxy was stop and removed, causing the next test to fail, since it would reuse the sni_proxy --- tests/integration/standard/test_scylla_cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_scylla_cloud.py b/tests/integration/standard/test_scylla_cloud.py index 94fb07290e..751bf656c3 100644 --- a/tests/integration/standard/test_scylla_cloud.py +++ b/tests/integration/standard/test_scylla_cloud.py @@ -41,7 +41,7 @@ def start_cluster_with_proxy(self): docker_id, listen_address, listen_port = \ start_sni_proxy(ccm_cluster.get_path(), nodes_info=nodes_info, listen_port=sni_port) - ccm_cluster.sni_proxy_docker_id = docker_id + ccm_cluster.sni_proxy_docker_ids = [docker_id] ccm_cluster.sni_proxy_listen_port = listen_port ccm_cluster._update_config() From 6eec22bc3d2653ff28a40b1659bbfb3279bc8eec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 30 Jun 2023 19:33:04 +0200 Subject: [PATCH 166/551] Disable MisconfiguredAuthenticationTests This tests is sometimes failing in CI. Needs investigation. --- .../standard/test_authentication_misconfiguration.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/standard/test_authentication_misconfiguration.py b/tests/integration/standard/test_authentication_misconfiguration.py index 546141d801..f5a9cebcdf 100644 --- a/tests/integration/standard/test_authentication_misconfiguration.py +++ b/tests/integration/standard/test_authentication_misconfiguration.py @@ -13,10 +13,13 @@ # limitations under the License. import unittest +import pytest from tests.integration import USE_CASS_EXTERNAL, use_cluster, TestCluster +@pytest.mark.skip(reason="Flaky test - needs investigation whether its Scylla's or driver's fault." + "Issue: https://github.com/scylladb/python-driver/issues/236") class MisconfiguredAuthenticationTests(unittest.TestCase): """ One node (not the contact point) has password auth. The rest of the nodes have no auth """ @classmethod From fa4fa5e1da413f4c3c46d854fb4a16e1cef963cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 3 Jul 2023 09:22:02 +0200 Subject: [PATCH 167/551] Disable test_metadata.py::SchemaMetadataTests::test_indexes This test was marked as xfail, but it turns out it sometimes passes, so this commit skips it completely. --- tests/integration/standard/test_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f95f510d9b..5e3219a23e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -476,7 +476,7 @@ def test_counter_with_dense_compact_storage(self): tablemeta = self.get_table_metadata() self.check_create_statement(tablemeta, create_statement) - @xfail_scylla('https://github.com/scylladb/scylladb/issues/6058') + @pytest.mark.skip(reason='https://github.com/scylladb/scylladb/issues/6058') def test_indexes(self): create_statement = self.make_create_statement(["a"], ["b", "c"], ["d", "e", "f"]) create_statement += " WITH CLUSTERING ORDER BY (b ASC, c ASC)" From db55247b2194c8edc1ff73fa66088c333830549e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 6 Jul 2023 14:19:38 +0200 Subject: [PATCH 168/551] Fix obvious copy-mistake in FunctionAndAggregateMetadataTests --- tests/integration/advanced/test_adv_metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/advanced/test_adv_metadata.py b/tests/integration/advanced/test_adv_metadata.py index 8228bfe220..66f682fd49 100644 --- a/tests/integration/advanced/test_adv_metadata.py +++ b/tests/integration/advanced/test_adv_metadata.py @@ -46,7 +46,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): if DSE_VERSION: - super(FunctionAndAggregateMetadataTests, cls).setUpClass() + super(FunctionAndAggregateMetadataTests, cls).tearDownClass() def setUp(self): self.func_name = self.function_table_name + '_func' From fab6b915d800312f3549b8d6c6140f31901c4b2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 6 Jul 2023 14:19:49 +0200 Subject: [PATCH 169/551] Skip Java UDF tests on Scylla instead of xfailing them I suspect that executing those tests is causing other failures we see in CI. Also, executing them on Scylla is pointless, since we don't support functionality used by them, so it's just makes pipeline run longer. --- tests/integration/__init__.py | 10 +++++----- tests/integration/standard/test_metadata.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9a40a62d59..cc85289881 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -372,15 +372,15 @@ def _id_and_mark(f): # 1. unittest doesn't skip setUpClass when used on class and we need it sometimes # 2. unittest doesn't have conditional xfail, and I prefer to use pytest than custom decorator # 3. unittest doesn't have a reason argument, so you don't see the reason in pytest report -requires_collection_indexes = pytest.mark.xfail(SCYLLA_VERSION is not None and Version(SCYLLA_VERSION.split(':')[1]) < Version('5.2'), +requires_collection_indexes = pytest.mark.skipif(SCYLLA_VERSION is not None and Version(SCYLLA_VERSION.split(':')[1]) < Version('5.2'), reason='Scylla supports collection indexes from 5.2 onwards') -requires_custom_indexes = pytest.mark.xfail(SCYLLA_VERSION is not None, +requires_custom_indexes = pytest.mark.skipif(SCYLLA_VERSION is not None, reason='Scylla does not support SASI or any other CUSTOM INDEX class') -requires_java_udf = pytest.mark.xfail(SCYLLA_VERSION is not None, +requires_java_udf = pytest.mark.skipif(SCYLLA_VERSION is not None, reason='Scylla does not support UDFs written in Java') -requires_composite_type = pytest.mark.xfail(SCYLLA_VERSION is not None, +requires_composite_type = pytest.mark.skipif(SCYLLA_VERSION is not None, reason='Scylla does not support composite types') -requires_custom_payload = pytest.mark.xfail(SCYLLA_VERSION is not None or PROTOCOL_VERSION < 4, +requires_custom_payload = pytest.mark.skipif(SCYLLA_VERSION is not None or PROTOCOL_VERSION < 4, reason='Scylla does not support custom payloads. Cassandra requires native protocol v4.0+') xfail_scylla = lambda reason, *args, **kwargs: pytest.mark.xfail(SCYLLA_VERSION is not None, reason=reason, *args, **kwargs) incorrect_test = lambda reason='This test seems to be incorrect and should be fixed', *args, **kwargs: pytest.mark.xfail(reason=reason, *args, **kwargs) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 5e3219a23e..c561491ab4 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -1474,7 +1474,7 @@ def test_index_follows_alter(self): self.assertIsInstance(table_meta.indexes[idx], IndexMetadata) self.drop_basic_table() - +@requires_java_udf class FunctionTest(unittest.TestCase): """ Base functionality for Function and Aggregate metadata test classes From 28b0dd1a83e15140f3ab840b27f9a575ac9b2edf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 13 Jul 2023 17:33:28 +0200 Subject: [PATCH 170/551] cqltypes: Serialize None values in collections as NULLs Fixes https://github.com/scylladb/python-driver/issues/201 When using parepared statements, None values in collections were serialized as empty values (values with length == 0). This is unexpected and inconsistent - None values are serialized as NULLs (vlaues with length == -1) in other cases: - Statement arguments, both for simple and prepared statements - Collection elements in simple statement This commit fixes this weird behavior - now None values should be serialized as NULLs in all cases. It also adds an integration test that checks new behavior. --- cassandra/cqltypes.py | 27 +++++++++----- tests/integration/standard/test_types.py | 47 +++++++++++++++++++++++- 2 files changed, 64 insertions(+), 10 deletions(-) diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index 8167b3b894..c2c0d9f905 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -832,9 +832,12 @@ def serialize_safe(cls, items, protocol_version): buf.write(pack(len(items))) inner_proto = max(3, protocol_version) for item in items: - itembytes = subtype.to_binary(item, inner_proto) - buf.write(pack(len(itembytes))) - buf.write(itembytes) + if item is None: + buf.write(pack(-1)) + else: + itembytes = subtype.to_binary(item, inner_proto) + buf.write(pack(len(itembytes))) + buf.write(itembytes) return buf.getvalue() @@ -902,12 +905,18 @@ def serialize_safe(cls, themap, protocol_version): raise TypeError("Got a non-map object for a map value") inner_proto = max(3, protocol_version) for key, val in items: - keybytes = key_type.to_binary(key, inner_proto) - valbytes = value_type.to_binary(val, inner_proto) - buf.write(pack(len(keybytes))) - buf.write(keybytes) - buf.write(pack(len(valbytes))) - buf.write(valbytes) + if key is not None: + keybytes = key_type.to_binary(key, inner_proto) + buf.write(pack(len(keybytes))) + buf.write(keybytes) + else: + buf.write(pack(-1)) + if val is not None: + valbytes = value_type.to_binary(val, inner_proto) + buf.write(pack(len(valbytes))) + buf.write(valbytes) + else: + buf.write(pack(-1)) return buf.getvalue() diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index bc26a3013e..4329574ba6 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -26,7 +26,7 @@ from cassandra.concurrent import execute_concurrent_with_args from cassandra.cqltypes import Int32Type, EMPTY from cassandra.query import dict_factory, ordered_dict_factory -from cassandra.util import sortedset, Duration +from cassandra.util import sortedset, Duration, OrderedMap from tests.unit.cython.utils import cythontest from tests.integration import use_singledc, execute_until_pass, notprotocolv1, \ @@ -723,6 +723,51 @@ def test_can_insert_tuples_with_nulls(self): self.assertEqual(('', None, None, b''), result[0].t) self.assertEqual(('', None, None, b''), s.execute(read)[0].t) + def test_insert_collection_with_null_fails(self): + """ + NULLs in list / sets / maps are forbidden. + This is a regression test - there was a bug that serialized None values + in collections as empty values instead of nulls. + """ + s = self.session + columns = [] + for collection_type in ['list', 'set']: + for simple_type in PRIMITIVE_DATATYPES_KEYS: + columns.append(f'{collection_type}_{simple_type} {collection_type}<{simple_type}>') + for simple_type in PRIMITIVE_DATATYPES_KEYS: + columns.append(f'map_k_{simple_type} map<{simple_type}, ascii>') + columns.append(f'map_v_{simple_type} map') + s.execute(f'CREATE TABLE collection_nulls (k int PRIMARY KEY, {", ".join(columns)})') + + def raises_simple_and_prepared(exc_type, query_str, args): + self.assertRaises(exc_type, lambda: s.execute(query_str, args)) + p = s.prepare(query_str.replace('%s', '?')) + self.assertRaises(exc_type, lambda: s.execute(p, args)) + + i = 0 + for simple_type in PRIMITIVE_DATATYPES_KEYS: + query_str = f'INSERT INTO collection_nulls (k, set_{simple_type}) VALUES (%s, %s)' + args = [i, sortedset([None, get_sample(simple_type)])] + raises_simple_and_prepared(InvalidRequest, query_str, args) + i += 1 + for simple_type in PRIMITIVE_DATATYPES_KEYS: + query_str = f'INSERT INTO collection_nulls (k, list_{simple_type}) VALUES (%s, %s)' + args = [i, [None, get_sample(simple_type)]] + raises_simple_and_prepared(InvalidRequest, query_str, args) + i += 1 + for simple_type in PRIMITIVE_DATATYPES_KEYS: + query_str = f'INSERT INTO collection_nulls (k, map_k_{simple_type}) VALUES (%s, %s)' + args = [i, OrderedMap([(get_sample(simple_type), 'abc'), (None, 'def')])] + raises_simple_and_prepared(InvalidRequest, query_str, args) + i += 1 + for simple_type in PRIMITIVE_DATATYPES_KEYS: + query_str = f'INSERT INTO collection_nulls (k, map_v_{simple_type}) VALUES (%s, %s)' + args = [i, OrderedMap([('abc', None), ('def', get_sample(simple_type))])] + raises_simple_and_prepared(InvalidRequest, query_str, args) + i += 1 + + + def test_can_insert_unicode_query_string(self): """ Test to ensure unicode strings can be used in a query From d0f472f0a0de88f1bc88f76928ea6bf556081b94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 14 Jul 2023 20:02:53 +0200 Subject: [PATCH 171/551] CI: Bump Scylla version to 5.1 5.0 is no longer supported. --- ci/run_integration_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh index 0b34e57772..4bcf4df1e1 100755 --- a/ci/run_integration_test.sh +++ b/ci/run_integration_test.sh @@ -15,7 +15,7 @@ if (( aio_max_nr != aio_max_nr_recommended_value )); then fi fi -SCYLLA_RELEASE='release:5.0' +SCYLLA_RELEASE='release:5.1' python3 -m venv .test-venv source .test-venv/bin/activate From 7c9df8500bf8260e404c6225207a4a5dc97aaab5 Mon Sep 17 00:00:00 2001 From: Anna Stuchlik Date: Wed, 26 Jul 2023 12:58:05 +0200 Subject: [PATCH 172/551] doc: remove "Upgrading from dse-driver" section This commit fixes a bug reported in https://github.com/scylladb/python-driver/issues/244 by removing the incorrect section from the Upgrading page. --- docs/upgrading.rst | 8 -------- 1 file changed, 8 deletions(-) diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 9559fa3579..6161b8c881 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -4,14 +4,6 @@ Upgrading .. toctree:: :maxdepth: 1 -Upgrading from dse-driver -------------------------- - -Since 3.21.0, scylla-driver fully supports DataStax products. dse-driver and -dse-graph users should now migrate to scylla-driver to benefit from latest bug fixes -and new features. The upgrade to this new unified driver version is straightforward -with no major API changes. - Installation ^^^^^^^^^^^^ From befd8b9bea45411e579ac2d95bafe6bb8569afcf Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Wed, 16 Aug 2023 12:23:14 +0200 Subject: [PATCH 173/551] Introduce ProtocolFeatures Introduces the ProtocolFeatures class which contains information that affects how the CQL protocol should be serialized and deserialized. Currently, it only supports the Scylla-specific SCYLLA_RATE_LIMIT_ERROR extension. --- cassandra/protocol_features.py | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 cassandra/protocol_features.py diff --git a/cassandra/protocol_features.py b/cassandra/protocol_features.py new file mode 100644 index 0000000000..8b73f32fbf --- /dev/null +++ b/cassandra/protocol_features.py @@ -0,0 +1,38 @@ +import logging + +log = logging.getLogger(__name__) + + +RATE_LIMIT_ERROR_EXTENSION = "SCYLLA_RATE_LIMIT_ERROR" + +class ProtocolFeatures(object): + rate_limit_error = None + + def __init__(self, rate_limit_error=None): + self.rate_limit_error = rate_limit_error + + @staticmethod + def parse_from_supported(supported): + return ProtocolFeatures(rate_limit_error = ProtocolFeatures.maybe_parse_rate_limit_error(supported)) + + @staticmethod + def maybe_parse_rate_limit_error(supported): + vals = supported.get(RATE_LIMIT_ERROR_EXTENSION) + if vals is not None: + code_str = ProtocolFeatures.get_cql_extension_field(vals, "ERROR_CODE") + return int(code_str) + + # Looks up a field which starts with `key=` and returns the rest + @staticmethod + def get_cql_extension_field(vals, key): + for v in vals: + stripped_v = v.strip() + if stripped_v.startswith(key) and stripped_v[len(key)] == '=': + result = stripped_v[len(key) + 1:] + return result + return None + + def add_startup_options(self, options): + if self.rate_limit_error is not None: + options[RATE_LIMIT_ERROR_EXTENSION] = "" + From 9b12cc9496d1c416db565723229ff28484bb8993 Mon Sep 17 00:00:00 2001 From: Yaniv Kaul Date: Wed, 23 Aug 2023 18:24:06 +0300 Subject: [PATCH 174/551] Use version agnostic TLS protocol Specifically, PROTOCOL_SSLv23 was changed to PROTOCOL_TLS_CLIENT and in the tests, TLSv1_2_METHOD was changed to TLS_CLIENT_METHOD Fixes: https://github.com/scylladb/python-driver/issues/250 Signed-off-by: Yaniv Kaul --- cassandra/scylla/cloud.py | 4 ++-- tests/integration/long/test_ssl.py | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index 40ef439aaf..3ddce06bf1 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -100,7 +100,7 @@ def get_server(self, data_center): return address, port, node_domain def create_ssl_context(self): - ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_SSLv23) + ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT) ssl_context.verify_mode = ssl.CERT_NONE if self.skip_tls_verify else ssl.CERT_REQUIRED for data_center in self.data_centers.values(): with file_or_memory(path=data_center.get('certificateAuthorityPath'), @@ -124,7 +124,7 @@ def create_pyopenssl_context(self): "PyOpenSSL must be installed to connect to scylla-cloud with the Eventlet or Twisted event loops"), sys.exc_info()[2] ) - ssl_context = SSL.Context(SSL.TLS_METHOD) + ssl_context = SSL.Context(SSL.TLS_CLIENT_METHOD) ssl_context.set_verify(SSL.VERIFY_PEER, callback=lambda _1, _2, _3, _4, ok: True if self.skip_tls_verify else ok) for data_center in self.data_centers.values(): with file_or_memory(path=data_center.get('certificateAuthorityPath'), diff --git a/tests/integration/long/test_ssl.py b/tests/integration/long/test_ssl.py index 69285001f8..b9319e15cd 100644 --- a/tests/integration/long/test_ssl.py +++ b/tests/integration/long/test_ssl.py @@ -51,7 +51,7 @@ USES_PYOPENSSL = "twisted" in EVENT_LOOP_MANAGER or "eventlet" in EVENT_LOOP_MANAGER if "twisted" in EVENT_LOOP_MANAGER: import OpenSSL - ssl_version = OpenSSL.SSL.TLSv1_2_METHOD + ssl_version = OpenSSL.SSL.TLS_METHOD verify_certs = {'cert_reqs': SSL.VERIFY_PEER, 'check_hostname': True} else: @@ -401,7 +401,7 @@ def test_can_connect_with_sslcontext_certificate(self): @test_category connection:ssl """ if USES_PYOPENSSL: - ssl_context = SSL.Context(SSL.TLSv1_2_METHOD) + ssl_context = SSL.Context(SSL.TLS_CLIENT_METHOD) ssl_context.load_verify_locations(CLIENT_CA_CERTS) else: ssl_context = ssl.SSLContext(ssl_version) @@ -425,7 +425,7 @@ def test_can_connect_with_ssl_client_auth_password_private_key(self): ssl_options = {} if USES_PYOPENSSL: - ssl_context = SSL.Context(SSL.TLSv1_2_METHOD) + ssl_context = SSL.Context(SSL.TLS_CLIENT_METHOD) ssl_context.use_certificate_file(abs_driver_certfile) with open(abs_driver_keyfile) as keyfile: key = crypto.load_privatekey(crypto.FILETYPE_PEM, keyfile.read(), b'cassandra') @@ -446,7 +446,7 @@ def test_can_connect_with_ssl_context_ca_host_match(self): """ ssl_options = {} if USES_PYOPENSSL: - ssl_context = SSL.Context(SSL.TLSv1_2_METHOD) + ssl_context = SSL.Context(SSL.TLS_CLIENT_METHOD) ssl_context.use_certificate_file(DRIVER_CERTFILE) with open(DRIVER_KEYFILE_ENCRYPTED) as keyfile: key = crypto.load_privatekey(crypto.FILETYPE_PEM, keyfile.read(), b'cassandra') @@ -469,7 +469,7 @@ def test_can_connect_with_ssl_context_ca_host_match(self): def test_cannot_connect_ssl_context_with_invalid_hostname(self): ssl_options = {} if USES_PYOPENSSL: - ssl_context = SSL.Context(SSL.TLSv1_2_METHOD) + ssl_context = SSL.Context(SSL.TLS_CLIENT_METHOD) ssl_context.use_certificate_file(DRIVER_CERTFILE) with open(DRIVER_KEYFILE_ENCRYPTED) as keyfile: key = crypto.load_privatekey(crypto.FILETYPE_PEM, keyfile.read(), b"cassandra") From 3ca24b287ccf33c407c9a5368a957c9b439531ba Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Wed, 16 Aug 2023 12:36:01 +0200 Subject: [PATCH 175/551] Add new error for rate limit Adds RateLimitReached error, which is a Scylla-specific error returned when a per-partition rate limit is exceeded. --- cassandra/__init__.py | 19 +++++++++++++++++++ cassandra/protocol.py | 17 +++++++++++++++-- docs/scylla-specific.rst | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 2 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index d5b1944cfd..c8d180d750 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from enum import Enum import logging @@ -728,3 +729,21 @@ class UnresolvableContactPoints(DriverException): contact points, only when lookup fails for all hosts """ pass + + +class OperationType(Enum): + Read = 0 + Write = 1 + +class RateLimitReached(ConfigurationException): + ''' + Rate limit was exceeded for a partition affected by the request. + ''' + op_type = None + rejected_by_coordinator = False + + def __init__(self, op_type=None, rejected_by_coordinator=False): + self.op_type = op_type + self.rejected_by_coordinator = rejected_by_coordinator + message = f"[request_error_rate_limit_reached OpType={op_type.name} RejectedByCoordinator={rejected_by_coordinator}]" + Exception.__init__(self, message) diff --git a/cassandra/protocol.py b/cassandra/protocol.py index ed92a76679..078bcc9d80 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -22,9 +22,9 @@ from six.moves import range import io -from cassandra import ProtocolVersion +from cassandra import OperationType, ProtocolVersion from cassandra import type_codes, DriverException -from cassandra import (Unavailable, WriteTimeout, ReadTimeout, +from cassandra import (Unavailable, WriteTimeout, RateLimitReached, ReadTimeout, WriteFailure, ReadFailure, FunctionFailure, AlreadyExists, InvalidRequest, Unauthorized, UnsupportedOperation, UserFunctionDescriptor, @@ -390,6 +390,19 @@ def recv_error_info(f, protocol_version): def to_exception(self): return AlreadyExists(**self.info) +class RateLimitReachedException(ConfigurationException): + summary= 'Rate limit was exceeded for a partition affected by the request' + error_code = 0x4321 + + @staticmethod + def recv_error_info(f, protocol_version): + return { + 'op_type': OperationType(read_byte(f)), + 'rejected_by_coordinator': read_byte(f) != 0 + } + + def to_exception(self): + return RateLimitReached(**self.info) class ClientWriteError(RequestExecutionException): summary = 'Client write failure.' diff --git a/docs/scylla-specific.rst b/docs/scylla-specific.rst index 101ddb534b..4a7b95b8c9 100644 --- a/docs/scylla-specific.rst +++ b/docs/scylla-specific.rst @@ -104,3 +104,39 @@ New Table Attributes cluster.refresh_table_metadata("keyspace1", "standard1") assert cluster.metadata.keyspaces["keyspace1"].tables["standard1"].options["in_memory"] == True + + +New Error Types +-------------------- + +* ``SCYLLA_RATE_LIMIT_ERROR`` Error + + The ScyllaDB 5.1 introduced a feature called per-partition rate limiting. In case the (user defined) per-partition rate limit is exceeded, the database will start returning a Scylla-specific type of error: RateLimitReached. + +.. code:: python + + from cassandra import RateLimitReached + from cassandra.cluster import Cluster + + cluster = Cluster() + session = cluster.connect() + session.execute(""" + CREATE KEYSPACE IF NOT EXISTS keyspace1 + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} + """) + + session.execute("USE keyspace1") + session.execute(""" + CREATE TABLE tbl (pk int PRIMARY KEY, v int) + WITH per_partition_rate_limit = {'max_writes_per_second': 1} + """) + + prepared = session.prepare(""" + INSERT INTO tbl (pk, v) VALUES (?, ?) + """) + + try: + for _ in range(1000): + self.session.execute(prepared.bind((123, 456))) + except RateLimitReached: + raise From f36ba79fecb2c1f8bbbe3bdb1e139677ffeb5b57 Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Wed, 16 Aug 2023 12:38:06 +0200 Subject: [PATCH 176/551] Use RateLimitReached error Now, the connection negotiates protocol features and uses them later in decoding. RateLimitReached is used instead of deafault. --- cassandra/connection.py | 18 ++++++++++++++---- cassandra/protocol.py | 13 ++++++++----- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index c3ba42d725..4e477c1e22 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -31,6 +31,8 @@ import random import itertools +from cassandra.protocol_features import ProtocolFeatures + if 'gevent.monkey' in sys.modules: from gevent.queue import Queue, Empty else: @@ -772,6 +774,8 @@ class Connection(object): _on_orphaned_stream_released = None + features = None + @property def _iobuf(self): # backward compatibility, to avoid any change in the reactors @@ -1263,7 +1267,7 @@ def process_msg(self, header, body): return try: - response = decoder(header.version, self.user_type_map, stream_id, + response = decoder(header.version, self.features, self.user_type_map, stream_id, header.flags, header.opcode, body, self.decompressor, result_metadata) except Exception as exc: log.exception("Error decoding response from Cassandra. " @@ -1338,6 +1342,11 @@ def _handle_options_response(self, options_response): remote_supported_compressions = options_response.options['COMPRESSION'] self._product_type = options_response.options.get('PRODUCT_TYPE', [None])[0] + protocol_features = ProtocolFeatures.parse_from_supported(options_response.options) + options = {} + protocol_features.add_startup_options(options) + self.features = protocol_features + if self.cql_version: if self.cql_version not in supported_cql_versions: raise ProtocolError( @@ -1388,13 +1397,14 @@ def _handle_options_response(self, options_response): self._compressor, self.decompressor = \ locally_supported_compressions[compression_type] - self._send_startup_message(compression_type, no_compact=self.no_compact) + self._send_startup_message(compression_type, no_compact=self.no_compact, extra_options=options) @defunct_on_error - def _send_startup_message(self, compression=None, no_compact=False): + def _send_startup_message(self, compression=None, no_compact=False, extra_options=None): log.debug("Sending StartupMessage on %s", self) opts = {'DRIVER_NAME': DRIVER_NAME, - 'DRIVER_VERSION': DRIVER_VERSION} + 'DRIVER_VERSION': DRIVER_VERSION, + **extra_options} if compression: opts['COMPRESSION'] = compression if no_compact: diff --git a/cassandra/protocol.py b/cassandra/protocol.py index 078bcc9d80..b1ab4707db 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -126,10 +126,13 @@ def __init__(self, code, message, info): self.info = info @classmethod - def recv_body(cls, f, protocol_version, *args): + def recv_body(cls, f, protocol_version, protocol_features, *args): code = read_int(f) msg = read_string(f) - subcls = error_classes.get(code, cls) + if code == protocol_features.rate_limit_error: + subcls = RateLimitReachedException + else: + subcls = error_classes.get(code, cls) extra_info = subcls.recv_error_info(f, protocol_version) return subcls(code=code, message=msg, info=extra_info) @@ -751,7 +754,7 @@ def recv(self, f, protocol_version, user_type_map, result_metadata): raise DriverException("Unknown RESULT kind: %d" % self.kind) @classmethod - def recv_body(cls, f, protocol_version, user_type_map, result_metadata): + def recv_body(cls, f, protocol_version, protocol_features, user_type_map, result_metadata): kind = read_int(f) msg = cls(kind) msg.recv(f, protocol_version, user_type_map, result_metadata) @@ -1160,7 +1163,7 @@ def _write_header(f, version, flags, stream_id, opcode, length): write_int(f, length) @classmethod - def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcode, body, + def decode_message(cls, protocol_version, protocol_features, user_type_map, stream_id, flags, opcode, body, decompressor, result_metadata): """ Decodes a native protocol message body @@ -1206,7 +1209,7 @@ def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcod log.warning("Unknown protocol flags set: %02x. May cause problems.", flags) msg_class = cls.message_types_by_opcode[opcode] - msg = msg_class.recv_body(body, protocol_version, user_type_map, result_metadata) + msg = msg_class.recv_body(body, protocol_version, protocol_features, user_type_map, result_metadata) msg.stream_id = stream_id msg.trace_id = trace_id msg.custom_payload = custom_payload From ea8afecf032c8bf4292bf7b35831493e270f74a8 Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Mon, 21 Aug 2023 10:25:59 +0200 Subject: [PATCH 177/551] Move sharding info to ProtocolFeatures Sharding is a protocol extention, now sharing-related info is a part of ProtocolFeatures class, also _ShardingInfo.parse_sharding_info is moved to ProtocolFeatures to have all features strings in one place. --- cassandra/c_shard_info.pyx | 18 ---------- cassandra/connection.py | 15 +++------ cassandra/pool.py | 44 ++++++++++++------------- cassandra/protocol_features.py | 31 +++++++++++++++-- cassandra/shard_info.py | 18 ---------- tests/unit/test_host_connection_pool.py | 9 ++--- tests/unit/test_shard_aware.py | 13 ++++---- 7 files changed, 67 insertions(+), 81 deletions(-) diff --git a/cassandra/c_shard_info.pyx b/cassandra/c_shard_info.pyx index 39c098ee82..a8affd9bba 100644 --- a/cassandra/c_shard_info.pyx +++ b/cassandra/c_shard_info.pyx @@ -36,24 +36,6 @@ cdef class ShardingInfo(): self.shard_aware_port = int(shard_aware_port) if shard_aware_port else 0 self.shard_aware_port_ssl = int(shard_aware_port_ssl) if shard_aware_port_ssl else 0 - @staticmethod - def parse_sharding_info(message): - shard_id = message.options.get('SCYLLA_SHARD', [''])[0] or None - shards_count = message.options.get('SCYLLA_NR_SHARDS', [''])[0] or None - partitioner = message.options.get('SCYLLA_PARTITIONER', [''])[0] or None - sharding_algorithm = message.options.get('SCYLLA_SHARDING_ALGORITHM', [''])[0] or None - sharding_ignore_msb = message.options.get('SCYLLA_SHARDING_IGNORE_MSB', [''])[0] or None - shard_aware_port = message.options.get('SCYLLA_SHARD_AWARE_PORT', [''])[0] or None - shard_aware_port_ssl = message.options.get('SCYLLA_SHARD_AWARE_PORT_SSL', [''])[0] or None - - if not (shard_id or shards_count or partitioner == "org.apache.cassandra.dht.Murmur3Partitioner" or - sharding_algorithm == "biased-token-round-robin" or sharding_ignore_msb): - return 0, None - - return int(shard_id), ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, - shard_aware_port, shard_aware_port_ssl) - - def shard_id_from_token(self, int64_t token_input): cdef uint64_t biased_token = token_input + (1 << 63); biased_token <<= self.sharding_ignore_msb; diff --git a/cassandra/connection.py b/cassandra/connection.py index 4e477c1e22..295066694b 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -767,9 +767,6 @@ class Connection(object): _owning_pool = None - shard_id = 0 - sharding_info = None - _is_checksumming_enabled = False _on_orphaned_stream_released = None @@ -835,7 +832,7 @@ def __init__(self, host='127.0.0.1', port=9042, authenticator=None, self.lock = RLock() self.connected_event = Event() - self.shard_id = shard_id + self.features = ProtocolFeatures(shard_id=shard_id) self.total_shards = total_shards self.original_endpoint = self.endpoint @@ -900,8 +897,8 @@ def _wrap_socket_from_context(self): self._socket = self.ssl_context.wrap_socket(self._socket, **ssl_options) def _initiate_connection(self, sockaddr): - if self.shard_id is not None: - for port in ShardawarePortGenerator.generate(self.shard_id, self.total_shards): + if self.features.shard_id is not None: + for port in ShardawarePortGenerator.generate(self.features.shard_id, self.total_shards): try: self._socket.bind(('', port)) break @@ -1322,7 +1319,7 @@ def _send_options_message(self): @defunct_on_error def _handle_options_response(self, options_response): - self.shard_id, self.sharding_info = ShardingInfo.parse_sharding_info(options_response) + self.features = ProtocolFeatures.parse_from_supported(options_response.options) if self.is_defunct: return @@ -1342,10 +1339,8 @@ def _handle_options_response(self, options_response): remote_supported_compressions = options_response.options['COMPRESSION'] self._product_type = options_response.options.get('PRODUCT_TYPE', [None])[0] - protocol_features = ProtocolFeatures.parse_from_supported(options_response.options) options = {} - protocol_features.add_startup_options(options) - self.features = protocol_features + self.features.add_startup_options(options) if self.cql_version: if self.cql_version not in supported_cql_versions: diff --git a/cassandra/pool.py b/cassandra/pool.py index 50c291d548..110b682c72 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -427,15 +427,15 @@ def __init__(self, host, host_distance, session): log.debug("Initializing connection for host %s", self.host) first_connection = session.cluster.connection_factory(self.host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) - log.debug("First connection created to %s for shard_id=%i", self.host, first_connection.shard_id) - self._connections[first_connection.shard_id] = first_connection + log.debug("First connection created to %s for shard_id=%i", self.host, first_connection.features.shard_id) + self._connections[first_connection.features.shard_id] = first_connection self._keyspace = session.keyspace if self._keyspace: first_connection.set_keyspace_blocking(self._keyspace) - if first_connection.sharding_info and not self._session.cluster.shard_aware_options.disable: - self.host.sharding_info = first_connection.sharding_info - self._open_connections_for_all_shards(first_connection.shard_id) + if first_connection.features.sharding_info and not self._session.cluster.shard_aware_options.disable: + self.host.sharding_info = first_connection.features.sharding_info + self._open_connections_for_all_shards(first_connection.features.shard_id) log.debug("Finished initializing connection for host %s", self.host) @@ -556,7 +556,7 @@ def return_connection(self, connection, stream_was_orphaned=False): with self._lock: if self.is_shutdown: return - self._connections.pop(connection.shard_id, None) + self._connections.pop(connection.features.shard_id, None) if self._is_replacing: return self._is_replacing = True @@ -587,17 +587,17 @@ def _replace(self, connection): log.debug("Replacing connection (%s) to %s", id(connection), self.host) try: - if connection.shard_id in self._connections.keys(): - del self._connections[connection.shard_id] + if connection.features.shard_id in self._connections.keys(): + del self._connections[connection.features.shard_id] if self.host.sharding_info and not self._session.cluster.shard_aware_options.disable: - self._connecting.add(connection.shard_id) - self._session.submit(self._open_connection_to_missing_shard, connection.shard_id) + self._connecting.add(connection.features.shard_id) + self._session.submit(self._open_connection_to_missing_shard, connection.features.shard_id) else: connection = self._session.cluster.connection_factory(self.host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) if self._keyspace: connection.set_keyspace_blocking(self._keyspace) - self._connections[connection.shard_id] = connection + self._connections[connection.features.shard_id] = connection except Exception: log.warning("Failed reconnecting %s. Retrying." % (self.host.endpoint,)) self._session.submit(self._replace, connection) @@ -703,23 +703,23 @@ def _open_connection_to_missing_shard(self, shard_id): else: conn = self._session.cluster.connection_factory(self.host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) - log.debug("Received a connection %s for shard_id=%i on host %s", id(conn), conn.shard_id, self.host) + log.debug("Received a connection %s for shard_id=%i on host %s", id(conn), conn.features.shard_id, self.host) if self.is_shutdown: log.debug("Pool for host %s is in shutdown, closing the new connection (%s)", self.host, id(conn)) conn.close() return - if shard_aware_endpoint and shard_id != conn.shard_id: + if shard_aware_endpoint and shard_id != conn.features.shard_id: # connection didn't land on expected shared # assuming behind a NAT, disabling advanced shard aware for a while self.disable_advanced_shard_aware(10 * 60) - old_conn = self._connections.get(conn.shard_id) + old_conn = self._connections.get(conn.features.shard_id) if old_conn is None or old_conn.orphaned_threshold_reached: log.debug( "New connection (%s) created to shard_id=%i on host %s", id(conn), - conn.shard_id, + conn.features.shard_id, self.host ) old_conn = None @@ -727,27 +727,27 @@ def _open_connection_to_missing_shard(self, shard_id): if self.is_shutdown: conn.close() return - if conn.shard_id in self._connections.keys(): + if conn.features.shard_id in self._connections.keys(): # Move the current connection to the trash and use the new one from now on - old_conn = self._connections[conn.shard_id] + old_conn = self._connections[conn.features.shard_id] log.debug( "Replacing overloaded connection (%s) with (%s) for shard %i for host %s", id(old_conn), id(conn), - conn.shard_id, + conn.features.shard_id, self.host ) if self._keyspace: conn.set_keyspace_blocking(self._keyspace) - self._connections[conn.shard_id] = conn + self._connections[conn.features.shard_id] = conn if old_conn is not None: remaining = old_conn.in_flight - len(old_conn.orphaned_request_ids) if remaining == 0: log.debug( "Immediately closing the old connection (%s) for shard %i on host %s", id(old_conn), - old_conn.shard_id, + old_conn.features.shard_id, self.host ) old_conn.close() @@ -755,7 +755,7 @@ def _open_connection_to_missing_shard(self, shard_id): log.debug( "Moving the connection (%s) for shard %i to trash on host %s, %i requests remaining", id(old_conn), - old_conn.shard_id, + old_conn.features.shard_id, self.host, remaining, ) @@ -800,7 +800,7 @@ def _open_connection_to_missing_shard(self, shard_id): log.debug( "Putting a connection %s to shard %i to the excess pool of host %s", id(conn), - conn.shard_id, + conn.features.shard_id, self.host ) close_connection = False diff --git a/cassandra/protocol_features.py b/cassandra/protocol_features.py index 8b73f32fbf..fc7c5b060e 100644 --- a/cassandra/protocol_features.py +++ b/cassandra/protocol_features.py @@ -1,5 +1,7 @@ import logging +from cassandra.shard_info import _ShardingInfo + log = logging.getLogger(__name__) @@ -7,13 +9,19 @@ class ProtocolFeatures(object): rate_limit_error = None + shard_id = 0 + sharding_info = None - def __init__(self, rate_limit_error=None): + def __init__(self, rate_limit_error=None, shard_id=0, sharding_info=None): self.rate_limit_error = rate_limit_error + self.shard_id = shard_id + self.sharding_info = sharding_info @staticmethod def parse_from_supported(supported): - return ProtocolFeatures(rate_limit_error = ProtocolFeatures.maybe_parse_rate_limit_error(supported)) + rate_limit_error = ProtocolFeatures.maybe_parse_rate_limit_error(supported) + shard_id, sharding_info = ProtocolFeatures.parse_sharding_info(supported) + return ProtocolFeatures(rate_limit_error, shard_id, sharding_info) @staticmethod def maybe_parse_rate_limit_error(supported): @@ -36,3 +44,22 @@ def add_startup_options(self, options): if self.rate_limit_error is not None: options[RATE_LIMIT_ERROR_EXTENSION] = "" + @staticmethod + def parse_sharding_info(options): + shard_id = options.get('SCYLLA_SHARD', [''])[0] or None + shards_count = options.get('SCYLLA_NR_SHARDS', [''])[0] or None + partitioner = options.get('SCYLLA_PARTITIONER', [''])[0] or None + sharding_algorithm = options.get('SCYLLA_SHARDING_ALGORITHM', [''])[0] or None + sharding_ignore_msb = options.get('SCYLLA_SHARDING_IGNORE_MSB', [''])[0] or None + shard_aware_port = options.get('SCYLLA_SHARD_AWARE_PORT', [''])[0] or None + shard_aware_port_ssl = options.get('SCYLLA_SHARD_AWARE_PORT_SSL', [''])[0] or None + log.debug("Parsing sharding info from message options %s", options) + + if not (shard_id or shards_count or partitioner == "org.apache.cassandra.dht.Murmur3Partitioner" or + sharding_algorithm == "biased-token-round-robin" or sharding_ignore_msb): + return 0, None + + return int(shard_id), _ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, + shard_aware_port, shard_aware_port_ssl) + + diff --git a/cassandra/shard_info.py b/cassandra/shard_info.py index a37b8467b5..8f62252193 100644 --- a/cassandra/shard_info.py +++ b/cassandra/shard_info.py @@ -28,24 +28,6 @@ def __init__(self, shard_id, shards_count, partitioner, sharding_algorithm, shar self.shard_aware_port = int(shard_aware_port) if shard_aware_port else None self.shard_aware_port_ssl = int(shard_aware_port_ssl) if shard_aware_port_ssl else None - @staticmethod - def parse_sharding_info(message): - shard_id = message.options.get('SCYLLA_SHARD', [''])[0] or None - shards_count = message.options.get('SCYLLA_NR_SHARDS', [''])[0] or None - partitioner = message.options.get('SCYLLA_PARTITIONER', [''])[0] or None - sharding_algorithm = message.options.get('SCYLLA_SHARDING_ALGORITHM', [''])[0] or None - sharding_ignore_msb = message.options.get('SCYLLA_SHARDING_IGNORE_MSB', [''])[0] or None - shard_aware_port = message.options.get('SCYLLA_SHARD_AWARE_PORT', [''])[0] or None - shard_aware_port_ssl = message.options.get('SCYLLA_SHARD_AWARE_PORT_SSL', [''])[0] or None - log.debug("Parsing sharding info from message options %s", message.options) - - if not (shard_id or shards_count or partitioner == "org.apache.cassandra.dht.Murmur3Partitioner" or - sharding_algorithm == "biased-token-round-robin" or sharding_ignore_msb): - return 0, None - - return int(shard_id), _ShardingInfo(shard_id, shards_count, partitioner, sharding_algorithm, sharding_ignore_msb, - shard_aware_port, shard_aware_port_ssl) - def shard_id_from_token(self, token): """ Convert a Murmur3 token to shard_id based on the number of shards on the host diff --git a/tests/unit/test_host_connection_pool.py b/tests/unit/test_host_connection_pool.py index 40f770f00c..efed55daa2 100644 --- a/tests/unit/test_host_connection_pool.py +++ b/tests/unit/test_host_connection_pool.py @@ -14,6 +14,7 @@ from concurrent.futures import ThreadPoolExecutor import logging import time +from cassandra.protocol_features import ProtocolFeatures from cassandra.shard_info import _ShardingInfo @@ -300,11 +301,11 @@ def mock_connection_factory(self, *args, **kwargs): connection.is_shutdown = False connection.is_defunct = False connection.is_closed = False - connection.shard_id = self.connection_counter + connection.features = ProtocolFeatures(shard_id=self.connection_counter, + sharding_info=_ShardingInfo(shard_id=1, shards_count=14, + partitioner="", sharding_algorithm="", sharding_ignore_msb=0, + shard_aware_port="", shard_aware_port_ssl="")) self.connection_counter += 1 - connection.sharding_info = _ShardingInfo(shard_id=1, shards_count=14, - partitioner="", sharding_algorithm="", sharding_ignore_msb=0, - shard_aware_port="", shard_aware_port_ssl="") return connection diff --git a/tests/unit/test_shard_aware.py b/tests/unit/test_shard_aware.py index dfe66eff8e..fe7b95edba 100644 --- a/tests/unit/test_shard_aware.py +++ b/tests/unit/test_shard_aware.py @@ -25,6 +25,7 @@ from cassandra.pool import HostConnection, HostDistance from cassandra.connection import ShardingInfo, DefaultEndPoint from cassandra.metadata import Murmur3Token +from cassandra.protocol_features import ProtocolFeatures LOGGER = logging.getLogger(__name__) @@ -43,7 +44,7 @@ class OptionsHolder(object): 'SCYLLA_SHARDING_ALGORITHM': ['biased-token-round-robin'], 'SCYLLA_SHARDING_IGNORE_MSB': ['12'] } - shard_id, shard_info = ShardingInfo.parse_sharding_info(OptionsHolder()) + shard_id, shard_info = ProtocolFeatures.parse_sharding_info(OptionsHolder().options) self.assertEqual(shard_id, 1) self.assertEqual(shard_info.shard_id_from_token(Murmur3Token.from_key(b"a").value), 4) @@ -88,12 +89,10 @@ def mock_connection_factory(self, *args, **kwargs): connection.is_defunct = False connection.is_closed = False connection.orphaned_threshold_reached = False - connection.endpoint = args[0] - connection.shard_id = kwargs.get('shard_id', self.connection_counter) + connection.endpoint = args[0] + sharding_info = ShardingInfo(shard_id=1, shards_count=4, partitioner="", sharding_algorithm="", sharding_ignore_msb=0, shard_aware_port=19042, shard_aware_port_ssl=19045) + connection.features = ProtocolFeatures(shard_id=kwargs.get('shard_id', self.connection_counter), sharding_info=sharding_info) self.connection_counter += 1 - connection.sharding_info = ShardingInfo(shard_id=1, shards_count=4, - partitioner="", sharding_algorithm="", sharding_ignore_msb=0, - shard_aware_port=19042, shard_aware_port_ssl=19045) return connection @@ -107,7 +106,7 @@ def mock_connection_factory(self, *args, **kwargs): f.result() assert len(pool._connections) == 4 for shard_id, connection in pool._connections.items(): - assert connection.shard_id == shard_id + assert connection.features.shard_id == shard_id if shard_id == 0: assert connection.endpoint == DefaultEndPoint("1.2.3.4") else: From 73b86ec96a0b9ab488316963d141f00be54e19ad Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Wed, 16 Aug 2023 12:34:00 +0200 Subject: [PATCH 178/551] Add test for rate limit exceeded --- .../standard/test_rate_limit_exceeded.py | 59 +++++++++++++++++++ tests/unit/test_protocol_features.py | 27 +++++++++ 2 files changed, 86 insertions(+) create mode 100644 tests/integration/standard/test_rate_limit_exceeded.py create mode 100644 tests/unit/test_protocol_features.py diff --git a/tests/integration/standard/test_rate_limit_exceeded.py b/tests/integration/standard/test_rate_limit_exceeded.py new file mode 100644 index 0000000000..280d6426e1 --- /dev/null +++ b/tests/integration/standard/test_rate_limit_exceeded.py @@ -0,0 +1,59 @@ +import logging +import unittest +from cassandra import OperationType, RateLimitReached +from cassandra.cluster import Cluster +from cassandra.policies import ConstantReconnectionPolicy, RoundRobinPolicy, TokenAwarePolicy + +from tests.integration import PROTOCOL_VERSION, use_cluster + +LOGGER = logging.getLogger(__name__) + +def setup_module(): + use_cluster('rate_limit', [3], start=True) + +class TestRateLimitExceededException(unittest.TestCase): + @classmethod + def setup_class(cls): + cls.cluster = Cluster(contact_points=["127.0.0.1"], protocol_version=PROTOCOL_VERSION, + load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), + reconnection_policy=ConstantReconnectionPolicy(1)) + cls.session = cls.cluster.connect() + + @classmethod + def teardown_class(cls): + cls.cluster.shutdown() + + def test_rate_limit_exceeded(self): + self.session.execute( + """ + DROP KEYSPACE IF EXISTS ratetests + """ + ) + self.session.execute( + """ + CREATE KEYSPACE IF NOT EXISTS ratetests + WITH REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor' : 1} + """) + + self.session.execute("USE ratetests") + self.session.execute( + """ + CREATE TABLE tbl (pk int PRIMARY KEY, v int) + WITH per_partition_rate_limit = {'max_writes_per_second': 1} + """) + + prepared = self.session.prepare( + """ + INSERT INTO tbl (pk, v) VALUES (?, ?) + """) + + # The rate limit is 1 write/s, so repeat the same query + # until an error occurs, it should happen quickly + def execute_write(): + for _ in range(1000): + self.session.execute(prepared.bind((123, 456))) + + with self.assertRaises(RateLimitReached) as context: + execute_write() + + self.assertEqual(context.exception.op_type, OperationType.Write) diff --git a/tests/unit/test_protocol_features.py b/tests/unit/test_protocol_features.py new file mode 100644 index 0000000000..bcf874f68f --- /dev/null +++ b/tests/unit/test_protocol_features.py @@ -0,0 +1,27 @@ +try: + import unittest2 as unittest +except ImportError: + import unittest # noqa + +import logging + +from cassandra.protocol_features import ProtocolFeatures + +LOGGER = logging.getLogger(__name__) + + +class TestProtocolFeatures(unittest.TestCase): + def test_parsing_rate_limit_error(self): + """ + Testing the parsing of the options command + """ + class OptionsHolder(object): + options = { + 'SCYLLA_RATE_LIMIT_ERROR': ["ERROR_CODE=123"] + } + + protocol_features = ProtocolFeatures.parse_from_supported(OptionsHolder().options) + + self.assertEqual(protocol_features.rate_limit_error, 123) + self.assertEqual(protocol_features.shard_id, 0) + self.assertEqual(protocol_features.sharding_info, None) From 67d8b94f9bf09a6590b7a95bd5031b63ee50ce5d Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Wed, 30 Aug 2023 10:10:23 +0200 Subject: [PATCH 179/551] Remove unsupported flag and fix formatting --- docs/scylla-specific.rst | 31 ------------------------------- docs/upgrading.rst | 6 +++--- 2 files changed, 3 insertions(+), 34 deletions(-) diff --git a/docs/scylla-specific.rst b/docs/scylla-specific.rst index 4a7b95b8c9..f830235088 100644 --- a/docs/scylla-specific.rst +++ b/docs/scylla-specific.rst @@ -75,37 +75,6 @@ New Cluster Helpers print("successfully connected to all shards of all scylla nodes") -New Table Attributes --------------------- - -* ``in_memory`` flag - - New flag available on ``TableMetadata.options`` to indicate that it is an `In Memory `_ table - -.. note:: in memory tables is a feature existing only in Scylla Enterprise - -.. code:: python - - from cassandra.cluster import Cluster - - cluster = Cluster() - session = cluster.connect() - session.execute(""" - CREATE KEYSPACE IF NOT EXISTS keyspace1 - WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}; - """) - - session.execute(""" - CREATE TABLE IF NOT EXISTS keyspace1.standard1 ( - key blob PRIMARY KEY, - "C0" blob - ) WITH in_memory=true AND compaction={'class': 'InMemoryCompactionStrategy'} - """) - - cluster.refresh_table_metadata("keyspace1", "standard1") - assert cluster.metadata.keyspaces["keyspace1"].tables["standard1"].options["in_memory"] == True - - New Error Types -------------------- diff --git a/docs/upgrading.rst b/docs/upgrading.rst index 6161b8c881..bc963e6722 100644 --- a/docs/upgrading.rst +++ b/docs/upgrading.rst @@ -91,7 +91,7 @@ DC-aware load balancing policy and to match other drivers. Execution API Updates ^^^^^^^^^^^^^^^^^^^^^ Result return normalization -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------- `PYTHON-368 `_ Previously results would be returned as a ``list`` of rows for result rows @@ -129,7 +129,7 @@ This can send requests and load (possibly large) results into memory, so `~.ResultSet` will log a warning on implicit materialization. Trace information is not attached to executed Statements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +-------------------------------------------------------- `PYTHON-318 `_ Previously trace data was attached to Statements if tracing was enabled. This @@ -147,7 +147,7 @@ returned for each query: :meth:`.ResultSet.get_all_query_traces()` Binding named parameters now ignores extra names -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------ `PYTHON-178 `_ Previously, :meth:`.BoundStatement.bind()` would raise if a mapping From d735957e3a7b7178f4fff26f6dbae588e58cf314 Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Tue, 29 Aug 2023 07:26:40 +0200 Subject: [PATCH 180/551] Reresolve DNS as fallback when all hosts are unreachable If all nodes in the cluster change their IPs at one time, driver used to no longer be able to ever contact the cluster; the only solution was to restart the driver. A fallback is added to the control connection logic so that when no known host is reachable, Cluster one again resolves all the known hostnames and ControlConnection tries to connect them. --- cassandra/cluster.py | 116 +++++++++++++++++++++++++------------------ 1 file changed, 69 insertions(+), 47 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 31ecd15b6f..b230443d7e 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1220,30 +1220,7 @@ def __init__(self, self.endpoint_factory = endpoint_factory or DefaultEndPointFactory(port=self.port) self.endpoint_factory.configure(self) - raw_contact_points = [] - for cp in [cp for cp in self.contact_points if not isinstance(cp, EndPoint)]: - raw_contact_points.append(cp if isinstance(cp, tuple) else (cp, port)) - - self.endpoints_resolved = [cp for cp in self.contact_points if isinstance(cp, EndPoint)] - self._endpoint_map_for_insights = {repr(ep): '{ip}:{port}'.format(ip=ep.address, port=ep.port) - for ep in self.endpoints_resolved} - - strs_resolved_map = _resolve_contact_points_to_string_map(raw_contact_points) - self.endpoints_resolved.extend(list(chain( - *[ - [DefaultEndPoint(ip, port) for ip, port in xs if ip is not None] - for xs in strs_resolved_map.values() if xs is not None - ] - ))) - - self._endpoint_map_for_insights.update( - {key: ['{ip}:{port}'.format(ip=ip, port=port) for ip, port in value] - for key, value in strs_resolved_map.items() if value is not None} - ) - - if contact_points and (not self.endpoints_resolved): - # only want to raise here if the user specified CPs but resolution failed - raise UnresolvableContactPoints(self._endpoint_map_for_insights) + self._resolve_hostnames() self.compression = compression @@ -1427,6 +1404,31 @@ def __init__(self, if application_version is not None: self.application_version = application_version + def _resolve_hostnames(self): + raw_contact_points = [] + for cp in [cp for cp in self.contact_points if not isinstance(cp, EndPoint)]: + raw_contact_points.append(cp if isinstance(cp, tuple) else (cp, self.port)) + + self.endpoints_resolved = [cp for cp in self.contact_points if isinstance(cp, EndPoint)] + self._endpoint_map_for_insights = {repr(ep): '{ip}:{port}'.format(ip=ep.address, port=ep.port) + for ep in self.endpoints_resolved} + strs_resolved_map = _resolve_contact_points_to_string_map(raw_contact_points) + self.endpoints_resolved.extend(list(chain( + *[ + [DefaultEndPoint(ip, port) for ip, port in xs if ip is not None] + for xs in strs_resolved_map.values() if xs is not None + ] + ))) + + self._endpoint_map_for_insights.update( + {key: ['{ip}:{port}'.format(ip=ip, port=port) for ip, port in value] + for key, value in strs_resolved_map.items() if value is not None} + ) + + if self.contact_points and (not self.endpoints_resolved): + # only want to raise here if the user specified CPs but resolution failed + raise UnresolvableContactPoints(self._endpoint_map_for_insights) + def _create_thread_pool_executor(self, **kwargs): """ Create a ThreadPoolExecutor for the cluster. In most cases, the built-in @@ -1720,6 +1722,20 @@ def protocol_downgrade(self, host_endpoint, previous_version): "http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_endpoint) self.protocol_version = new_version + def _add_resolved_hosts(self): + for endpoint in self.endpoints_resolved: + host, new = self.add_host(endpoint, signal=False) + if new: + host.set_up() + for listener in self.listeners: + listener.on_add(host) + + self.profile_manager.populate( + weakref.proxy(self), self.metadata.all_hosts()) + self.load_balancing_policy.populate( + weakref.proxy(self), self.metadata.all_hosts() + ) + def connect(self, keyspace=None, wait_for_all_pools=False): """ Creates and returns a new :class:`~.Session` object. @@ -1740,18 +1756,8 @@ def connect(self, keyspace=None, wait_for_all_pools=False): self.contact_points, self.protocol_version) self.connection_class.initialize_reactor() _register_cluster_shutdown(self) - for endpoint in self.endpoints_resolved: - host, new = self.add_host(endpoint, signal=False) - if new: - host.set_up() - for listener in self.listeners: - listener.on_add(host) - - self.profile_manager.populate( - weakref.proxy(self), self.metadata.all_hosts()) - self.load_balancing_policy.populate( - weakref.proxy(self), self.metadata.all_hosts() - ) + + self._add_resolved_hosts() try: self.control_connection.connect() @@ -3585,16 +3591,8 @@ def _set_new_connection(self, conn): if old: log.debug("[control connection] Closing old connection %r, replacing with %r", old, conn) old.close() - - def _reconnect_internal(self): - """ - Tries to connect to each host in the query plan until one succeeds - or every attempt fails. If successful, a new Connection will be - returned. Otherwise, :exc:`NoHostAvailable` will be raised - with an "errors" arg that is a dict mapping host addresses - to the exception that was raised when an attempt was made to open - a connection to that host. - """ + + def _connect_host_in_lbp(self): errors = {} lbp = ( self._cluster.load_balancing_policy @@ -3604,7 +3602,7 @@ def _reconnect_internal(self): for host in lbp.make_query_plan(): try: - return self._try_connect(host) + return (self._try_connect(host), None) except ConnectionException as exc: errors[str(host.endpoint)] = exc log.warning("[control connection] Error connecting to %s:", host, exc_info=True) @@ -3614,7 +3612,31 @@ def _reconnect_internal(self): log.warning("[control connection] Error connecting to %s:", host, exc_info=True) if self._is_shutdown: raise DriverException("[control connection] Reconnection in progress during shutdown") + + return (None, errors) + def _reconnect_internal(self): + """ + Tries to connect to each host in the query plan until one succeeds + or every attempt fails. If successful, a new Connection will be + returned. Otherwise, :exc:`NoHostAvailable` will be raised + with an "errors" arg that is a dict mapping host addresses + to the exception that was raised when an attempt was made to open + a connection to that host. + """ + (conn, _) = self._connect_host_in_lbp() + if conn is not None: + return conn + + # Try to re-resolve hostnames as a fallback when all hosts are unreachable + self._cluster._resolve_hostnames() + + self._cluster._add_resolved_hosts() + + (conn, errors) = self._connect_host_in_lbp() + if conn is not None: + return conn + raise NoHostAvailable("Unable to connect to any servers", errors) def _try_connect(self, host): From 83d7394ffb7d84bab16f60ae095d6827df5349a4 Mon Sep 17 00:00:00 2001 From: Yaniv Kaul Date: Thu, 17 Aug 2023 12:34:15 +0300 Subject: [PATCH 181/551] Connection to a ScyllaDB cluster is delayed as the driver tries to query system.peers_v2 table The logic is now that if there is sharding information available, it's a Scylla cluster and then do NOT try to use that table. Fixes: #245 Signed-off-by: Yaniv Kaul --- cassandra/cluster.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index b230443d7e..4bc1e2931a 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3668,6 +3668,11 @@ def _try_connect(self, host): "registering watchers and refreshing schema and topology", connection) + # Indirect way to determine if conencted to a ScyllaDB cluster, which does not support peers_v2 + # If sharding information is available, it's a ScyllaDB cluster, so do not use peers_v2 table. + if connection.features.sharding_info is not None: + self._uses_peers_v2 = False + # use weak references in both directions # _clear_watcher will be called when this ControlConnection is about to be finalized # _watch_callback will get the actual callback from the Connection and relay it to From 7b287a81c6eefd0819fc2cd8594d3167e1e7d7ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 26 Sep 2023 21:03:20 +0200 Subject: [PATCH 182/551] Fix wait_for_schema_agreement deadlock Fixes https://github.com/scylladb/python-driver/issues/168 Fix works by extracting part of on_down that marks host as down out of the executor - so it does not need to wait for free thread. When host is marked as down, wait_for_schema_agreement can finish, which in turn enables rest of on_down (the part that still runs on executor) to be executed. --- cassandra/cluster.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 31ecd15b6f..4476bbb0e3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2003,6 +2003,17 @@ def _start_reconnector(self, host, is_host_addition): reconnector.start() @run_in_executor + def on_down_potentially_blocking(self, host, is_host_addition): + self.profile_manager.on_down(host) + self.control_connection.on_down(host) + for session in tuple(self.sessions): + session.on_down(host) + + for listener in self.listeners: + listener.on_down(host) + + self._start_reconnector(host, is_host_addition) + def on_down(self, host, is_host_addition, expect_host_to_be_down=False): """ Intended for internal use only. @@ -2028,18 +2039,9 @@ def on_down(self, host, is_host_addition, expect_host_to_be_down=False): host.set_down() if (not was_up and not expect_host_to_be_down) or host.is_currently_reconnecting(): return - log.warning("Host %s has been marked down", host) - self.profile_manager.on_down(host) - self.control_connection.on_down(host) - for session in tuple(self.sessions): - session.on_down(host) - - for listener in self.listeners: - listener.on_down(host) - - self._start_reconnector(host, is_host_addition) + self.on_down_potentially_blocking(host, is_host_addition) def on_add(self, host, refresh_nodes=True): if self.is_shutdown: From 01383bc7f1e725ae0a087616cb3cdf0e6c69004d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 27 Sep 2023 14:29:29 +0200 Subject: [PATCH 183/551] Add regression test for schema deadlock Regression test for deadlock when performing schema change right after killing a node: https://github.com/scylladb/python-driver/issues/168 --- ..._concurrent_schema_change_and_node_kill.py | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 tests/integration/standard/test_concurrent_schema_change_and_node_kill.py diff --git a/tests/integration/standard/test_concurrent_schema_change_and_node_kill.py b/tests/integration/standard/test_concurrent_schema_change_and_node_kill.py new file mode 100644 index 0000000000..aeda381c0d --- /dev/null +++ b/tests/integration/standard/test_concurrent_schema_change_and_node_kill.py @@ -0,0 +1,36 @@ +import os +import logging +import unittest + +from tests.integration import use_cluster, get_node, local, TestCluster + +LOGGER = logging.getLogger(__name__) + + +def setup_module(): + use_cluster('test_concurrent_schema_change_and_node_kill', [3], start=True) + +@local +class TestConcurrentSchemaChangeAndNodeKill(unittest.TestCase): + @classmethod + def setup_class(cls): + cls.cluster = TestCluster(max_schema_agreement_wait=120) + cls.session = cls.cluster.connect() + + @classmethod + def teardown_class(cls): + cls.cluster.shutdown() + + def test_schema_change_after_node_kill(self): + node2 = get_node(2) + self.session.execute( + "DROP KEYSPACE IF EXISTS ks_deadlock;") + self.session.execute( + "CREATE KEYSPACE IF NOT EXISTS ks_deadlock " + "WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '2' };") + self.session.set_keyspace('ks_deadlock') + self.session.execute("CREATE TABLE IF NOT EXISTS some_table(k int, c int, v int, PRIMARY KEY (k, v));") + self.session.execute("INSERT INTO some_table (k, c, v) VALUES (1, 2, 3);") + node2.stop(wait=False, gently=False) + self.session.execute("ALTER TABLE some_table ADD v2 int;", timeout=180) + print(self.session.execute("SELECT * FROM some_table WHERE k = 1;").all()) From 11b3ac1a2f3456a0d6ef74cdcabad10e62237b68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 27 Sep 2023 18:19:47 +0200 Subject: [PATCH 184/551] Release 3.26.3 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index c8d180d750..318627cfe1 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 2) +__version_info__ = (3, 26, 3) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index ec6d2b2dd0..431a0c14d9 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,10 +11,10 @@ # -- General configuration ----------------------------------------------------- # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.2-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.3-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.26.2-scylla' +LATEST_VERSION = '3.26.3-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From 64e7fad42ec88dfc72c7f12c389c3ef6c3f392fb Mon Sep 17 00:00:00 2001 From: Alexey Kartashov Date: Thu, 19 Oct 2023 18:25:54 +0200 Subject: [PATCH 185/551] tests: Disable strict_is_not_null_in_views for scylla clusters This change allows `test_metadata_with_quoted_identifiers` to run, as it tries to create materialized view with IS NOT NULL restriction on values --- tests/integration/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index cc85289881..e728bc7740 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -612,6 +612,10 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, # Selecting only features we need for tests, i.e. anything but CDC. CCM_CLUSTER = CCMScyllaCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf'], 'start_native_transport': True}) + + # Permit IS NOT NULL restriction on non-primary key columns of a materialized view + # This allows `test_metadata_with_quoted_identifiers` to run + CCM_CLUSTER.set_configuration_options({'strict_is_not_null_in_views': False}) else: CCM_CLUSTER = CCMCluster(path, cluster_name, **ccm_options) CCM_CLUSTER.set_configuration_options({'start_native_transport': True}) From 9cb1004b8aeb4297b2445ba725c2acbcba378b26 Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 10 Oct 2023 15:41:37 +0100 Subject: [PATCH 186/551] docs: update theme 1.6 docs: remove unused deps docs: update deps docs: update deps docs: update deps docs: update deps docs: update deps Delete .eggs/README.txt docs: update deps fix: warning --- .github/workflows/docs-pages.yaml | 2 +- .github/workflows/docs-pr.yaml | 2 +- docs/Makefile | 7 +------ docs/api/cassandra/cluster.rst | 2 +- docs/conf.py | 19 ++++++++++--------- docs/pyproject.toml | 23 +++++++++++------------ 6 files changed, 25 insertions(+), 30 deletions(-) diff --git a/.github/workflows/docs-pages.yaml b/.github/workflows/docs-pages.yaml index 7f45132c9c..454c013441 100644 --- a/.github/workflows/docs-pages.yaml +++ b/.github/workflows/docs-pages.yaml @@ -22,7 +22,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v3 with: - python-version: 3.7 + python-version: 3.9 - name: Set up env run: make -C docs setupenv - name: Build driver diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index 203d41aed5..1935567dea 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -21,7 +21,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v3 with: - python-version: 3.7 + python-version: 3.9 - name: Set up env run: make -C docs setupenv - name: Build driver diff --git a/docs/Makefile b/docs/Makefile index 99b2a0f2a8..d1c3a4c8ec 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,7 +1,7 @@ # Global variables # You can set these variables from the command line. POETRY = poetry -SPHINXOPTS = +SPHINXOPTS = -j auto SPHINXBUILD = $(POETRY) run sphinx-build PAPER = BUILDDIR = _build @@ -13,11 +13,6 @@ PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR) TESTSPHINXOPTS = $(ALLSPHINXOPTS) -W --keep-going -# Windows variables -ifeq ($(OS),Windows_NT) - POETRY = $(APPDATA)\Python\Scripts\poetry -endif - .PHONY: all all: dirhtml diff --git a/docs/api/cassandra/cluster.rst b/docs/api/cassandra/cluster.rst index 2b3d7828a8..a9a9d378a4 100644 --- a/docs/api/cassandra/cluster.rst +++ b/docs/api/cassandra/cluster.rst @@ -215,7 +215,7 @@ .. automethod:: add_errback(fn, *args, **kwargs) - .. automethod:: add_callbacks(callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_args=None) + .. automethod:: add_callbacks(callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_kwargs=None) .. autoclass:: ResultSet () :members: diff --git a/docs/conf.py b/docs/conf.py index 431a0c14d9..98d4883094 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,14 +1,13 @@ # -*- coding: utf-8 -*- import os import sys -from datetime import date from sphinx_scylladb_theme.utils import multiversion_regex_builder sys.path.insert(0, os.path.abspath('..')) import cassandra -# -- General configuration ----------------------------------------------------- +# -- Global variables # Build documentation for the following tags and branches TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.3-scylla'] @@ -20,6 +19,8 @@ # Set which versions are deprecated DEPRECATED_VERSIONS = [''] +# -- General configuration + # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ @@ -69,7 +70,7 @@ # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' -# -- Options for not found extension ------------------------------------------- +# -- Options for not found extension # Template used to render the 404.html generated by this extension. notfound_template = '404.html' @@ -77,7 +78,7 @@ # Prefix added to all the URLs generated in the 404 page. notfound_urls_prefix = '' -# -- Options for multiversion -------------------------------------------------- +# -- Options for multiversion # Whitelist pattern for tags smv_tag_whitelist = multiversion_regex_builder(TAGS) @@ -94,16 +95,16 @@ # Format for versioned output directories inside the build directory smv_outputdir_format = '{ref.name}' -# -- Options for HTML output -------------------------------------------------- +# -- Options for sitemap extension + +sitemap_url_scheme = "/stable/{link}" + +# -- Options for HTML output # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_scylladb_theme' -# -- Options for sitemap extension --------------------------------------- - -sitemap_url_scheme = "/stable/{link}" - # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 4a1656322b..d9c8bf8f04 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -5,24 +5,23 @@ description = "ScyllaDB Python Driver Docs" authors = ["Python Driver Contributors"] [tool.poetry.dependencies] -dnspython = "2.2.1" -eventlet = "0.25.2" +eventlet = "^0.33.3" futures = "2.2.0" -geomet = "0.1.2" -gevent = "20.12.1" +geomet = ">=0.1,<0.3" +gevent = "^23.9.1" gremlinpython = "3.4.7" -python = "^3.7" -pyyaml = "6.0" +python = "^3.9" +pyyaml = "6.0.1" pygments = "2.15.1" recommonmark = "0.7.1" redirects_cli ="~0.1.2" sphinx-autobuild = "2021.3.14" -sphinx-sitemap = "2.5.0" -sphinx-scylladb-theme = "~1.5.1" -sphinx-multiversion-scylla = "~0.2.11" -Sphinx = "4.3.2" -scales = "1.0.9" -six = "1.15.0" +sphinx-sitemap = "2.5.1" +sphinx-scylladb-theme = "~1.6.1" +sphinx-multiversion-scylla = "~0.3.1" +Sphinx = "7.2.6" +scales = "^1.0.9" +six = ">=1.9" [build-system] requires = ["poetry>=0.12"] From b60e36fd997083bf33c82c3e0174f261e3c77b5a Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 19 Oct 2023 00:25:49 +0300 Subject: [PATCH 187/551] CI: update cibuildwheel==2.16.2 so we can have python 3.12 wheels cibuildwheel==2.16.2 supports building with python 3.12.0 release --- .github/workflows/build-experimental.yml | 2 +- .github/workflows/build-push.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-experimental.yml b/.github/workflows/build-experimental.yml index 2e9540ebf3..f6d88d9388 100644 --- a/.github/workflows/build-experimental.yml +++ b/.github/workflows/build-experimental.yml @@ -32,7 +32,7 @@ jobs: - name: Install cibuildwheel run: | - python -m pip install cibuildwheel==2.12.1 + python -m pip install cibuildwheel==2.16.2 - name: Build wheels run: | diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 1844340e73..4444f13051 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -10,7 +10,7 @@ env: CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libffi-devel libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" - CIBW_SKIP: cp35* cp36* *musllinux* + CIBW_SKIP: cp35* cp36* *musllinux* cp312* jobs: build_wheels: @@ -53,7 +53,7 @@ jobs: - name: Install cibuildwheel run: | - python -m pip install cibuildwheel==2.12.1 + python3 -m pip install cibuildwheel==2.16.2 - name: Install OpenSSL for Windows if: runner.os == 'Windows' From 5dafcb56a79442fb6b4b21b7a7dd759ea8c25487 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 19 Oct 2023 02:39:29 +0300 Subject: [PATCH 188/551] unittests: fix unittest to work with python 3.12 * few import needed to be ajusted/ignored * need to update cython to latest version --- tests/__init__.py | 10 ++++++++-- tests/unit/io/test_asyncorereactor.py | 11 +++++++++-- tests/unit/io/test_eventletreactor.py | 7 +++---- tests/unit/test_response_future.py | 2 +- 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index 6ebce1d711..2d19d29276 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -25,14 +25,20 @@ def is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False - import eventlet.patcher + try: + import eventlet.patcher + except AttributeError: + return False return eventlet.patcher.is_monkey_patched('socket') def is_gevent_monkey_patched(): if 'gevent.monkey' not in sys.modules: return False - import gevent.socket + try: + import gevent.socket + except AttributeError: + return False return socket.socket is gevent.socket.socket diff --git a/tests/unit/io/test_asyncorereactor.py b/tests/unit/io/test_asyncorereactor.py index 6f493896d0..e9fe9aa2cb 100644 --- a/tests/unit/io/test_asyncorereactor.py +++ b/tests/unit/io/test_asyncorereactor.py @@ -15,12 +15,19 @@ from mock import patch import socket -import cassandra.io.asyncorereactor as asyncorereactor -from cassandra.io.asyncorereactor import AsyncoreConnection +try: + import cassandra.io.asyncorereactor as asyncorereactor + from cassandra.io.asyncorereactor import AsyncoreConnection + ASYNCCORE_AVAILABLE = True +except ImportError: + ASYNCCORE_AVAILABLE = False + AsyncoreConnection = None + from tests import is_monkey_patched from tests.unit.io.utils import ReactorTestMixin, TimerTestMixin, noop_if_monkey_patched +@unittest.skipIf(not ASYNCCORE_AVAILABLE, 'asyncore is deprecated') class AsyncorePatcher(unittest.TestCase): @classmethod diff --git a/tests/unit/io/test_eventletreactor.py b/tests/unit/io/test_eventletreactor.py index e2b6a533a8..8da711075d 100644 --- a/tests/unit/io/test_eventletreactor.py +++ b/tests/unit/io/test_eventletreactor.py @@ -14,16 +14,15 @@ import unittest +from mock import patch from tests.unit.io.utils import TimerTestMixin from tests import notpypy, EVENT_LOOP_MANAGER -from eventlet import monkey_patch -from mock import patch - try: + from eventlet import monkey_patch from cassandra.io.eventletreactor import EventletConnection -except ImportError: +except (ImportError, AttributeError): EventletConnection = None # noqa skip_condition = EventletConnection is None or EVENT_LOOP_MANAGER != "eventlet" diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 0d3029652a..4e212a0355 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -627,7 +627,7 @@ def test_timeout_does_not_release_stream_id(self): rf._on_timeout() pool.return_connection.assert_called_once_with(connection, stream_was_orphaned=True) - self.assertRaisesRegexp(OperationTimedOut, "Client request timeout", rf.result) + self.assertRaisesRegex(OperationTimedOut, "Client request timeout", rf.result) assert len(connection.request_ids) == 0, \ "Request IDs should be empty but it's not: {}".format(connection.request_ids) From e4b6155e0265f07ee859a6bf2caec67cdea14c9d Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 19 Oct 2023 22:51:49 +0300 Subject: [PATCH 189/551] test-requirements.txt: remove pinning from cython since we want to support python 3.12, we need to remove this pinning, cause those versions are casueing the cython related unittets to fail: ``` ImportError while importing test module '/project/tests/unit/cython/test_bytesio.py'. Hint: make sure your test modules/packages have valid Python names. Traceback: /opt/python/cp312-cp312/lib/python3.12/importlib/__init__.py:90: in import_module return _bootstrap._gcd_import(name[level:], package, level) /project/tests/unit/cython/test_bytesio.py:16: in bytesio_testhelper = cyimport('tests.unit.cython.bytesio_testhelper') /project/tests/unit/cython/utils.py:29: in cyimport import pyximport ../venv/lib/python3.12/site-packages/pyximport/__init__.py:1: in from .pyximport import * ../venv/lib/python3.12/site-packages/pyximport/pyximport.py:51: in import imp E ModuleNotFoundError: No module named 'imp' ``` --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 780fa89e18..6015aad6b0 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,7 +10,7 @@ twisted[tls]==19.2.1; python_version < '3.5' gevent>=1.0; platform_machine != 'i686' and platform_machine != 'win32' gevent==20.5.0; platform_machine == 'i686' or platform_machine == 'win32' eventlet>=0.33.3 -cython>=0.20,<0.30 +cython packaging futurist; python_version >= '3.7' asynctest; python_version >= '3.5' From bf09af153ef3deaad3bb2758648f684d1c93bc32 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 19 Oct 2023 23:34:22 +0300 Subject: [PATCH 190/551] CI: stop running the eventlet unittests eventlet is currently broken for python 3.12, so until we have a fixed version. we'll remove those tests from the build wheel action Ref: https://github.com/eventlet/eventlet/issues/795 --- .github/workflows/build-push.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 4444f13051..f508fd7785 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -4,7 +4,7 @@ on: [push, pull_request] env: - CIBW_TEST_COMMAND_LINUX: "pytest --import-mode append {project}/tests/unit -k 'not (test_connection_initialization or test_cloud)' && EVENT_LOOP_MANAGER=gevent pytest --import-mode append {project}/tests/unit/io/test_geventreactor.py && EVENT_LOOP_MANAGER=eventlet pytest --import-mode append {project}/tests/unit/io/test_eventletreactor.py " + CIBW_TEST_COMMAND_LINUX: "pytest --import-mode append {project}/tests/unit -k 'not (test_connection_initialization or test_cloud)' && EVENT_LOOP_MANAGER=gevent pytest --import-mode append {project}/tests/unit/io/test_geventreactor.py" CIBW_TEST_COMMAND_MACOS: "pytest --import-mode append {project}/tests/unit -k 'not (test_multi_timer_validation or test_empty_connections or test_connection_initialization or test_timer_cancellation or test_cloud)' " CIBW_TEST_COMMAND_WINDOWS: "pytest --import-mode append {project}/tests/unit -k \"not (test_deserialize_date_range_year or test_datetype or test_libevreactor or test_connection_initialization or test_cloud)\" " CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" From b1c6e6d3ff108961f8b648cf2bfbef97827cb3d4 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 9 Nov 2023 12:46:11 +0200 Subject: [PATCH 191/551] CI: switch to python build command for sdist switch from `python setup.py sdist` to `python -m build --sdist` that's now the formal way to build, and not assume we have `distutil` installed on that system. --- .github/workflows/build-push.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index f508fd7785..0074a93fdc 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -130,8 +130,10 @@ jobs: name: Install Python - name: Build sdist - run: python setup.py sdist - + run: | + pip install build + python -m build --sdist + - uses: actions/upload-artifact@v2 with: path: dist/*.tar.gz From 8dcb657d7dd4ac4a92893cd212d8987dfbfd1707 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 11 Oct 2023 15:28:09 +0300 Subject: [PATCH 192/551] CI: run integration tests on multiple EVENT_LOOP_MANAGER and python versions since we need to deprecate asyncore which was the default event loop manager, we need to extend the testing of some of the other so we can select a new default --- .github/workflows/integration-tests.yml | 44 +++++++++++++++++++++---- ci/run_integration_test.sh | 4 +-- 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index c16a7a8279..e8fdc44f46 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -10,15 +10,47 @@ on: jobs: tests: - runs-on: ubuntu-20.04 + name: test ${{ matrix.event_loop_manager }} (${{ matrix.python-version }}) if: "!contains(github.event.pull_request.labels.*.name, 'disable-integration-tests')" + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-latest + python-version: "3.8" + event_loop_manager: "libev" + + - os: ubuntu-latest + python-version: "3.8" + event_loop_manager: "asyncio" + + - os: ubuntu-latest + python-version: "3.8" + event_loop_manager: "asyncore" + + - os: ubuntu-latest + python-version: "3.11" + event_loop_manager: "libev" + + - os: ubuntu-latest + python-version: "3.11" + event_loop_manager: "asyncio" + + - os: ubuntu-latest + python-version: "3.11" + event_loop_manager: "asyncore" + + - os: ubuntu-latest + python-version: "3.12" + event_loop_manager: "libev" steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 with: - python-version: 3.8 - + python-version: ${{ matrix.python-version }} - name: Test with pytest run: | + export EVENT_LOOP_MANAGER=${{ matrix.event_loop_manager }} ./ci/run_integration_test.sh tests/integration/standard/ tests/integration/cqlengine/ diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh index 4bcf4df1e1..b064b45399 100755 --- a/ci/run_integration_test.sh +++ b/ci/run_integration_test.sh @@ -38,9 +38,7 @@ ccm remove # run test -echo "export SCYLLA_VERSION=${SCYLLA_RELEASE}" -echo "PROTOCOL_VERSION=4 EVENT_LOOP_MANAGER=asyncio pytest --import-mode append tests/integration/standard/" export SCYLLA_VERSION=${SCYLLA_RELEASE} export MAPPED_SCYLLA_VERSION=3.11.4 -PROTOCOL_VERSION=4 EVENT_LOOP_MANAGER=libev pytest -rf --import-mode append $* +PROTOCOL_VERSION=4 pytest -rf --import-mode append $* From 06a74ee6cf86f14fca8203db382a79f3b736a483 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 11 Oct 2023 18:25:43 +0300 Subject: [PATCH 193/551] CI: update checkout and setup-python actions - actions/checkout@v3 - actions/setup-python@v4 the version we were using was using older node version, and github started warning us about it --- .github/workflows/build-experimental.yml | 4 ++-- .github/workflows/build-push.yml | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build-experimental.yml b/.github/workflows/build-experimental.yml index f6d88d9388..182f57d239 100644 --- a/.github/workflows/build-experimental.yml +++ b/.github/workflows/build-experimental.yml @@ -18,7 +18,7 @@ jobs: archs: [ aarch64, ppc64le ] steps: - - uses: actions/checkout@v2.1.0 + - uses: actions/checkout@v3 - name: Set up QEMU id: qemu @@ -27,7 +27,7 @@ jobs: platforms: all if: runner.os == 'Linux' - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v4 name: Install Python - name: Install cibuildwheel diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 0074a93fdc..2118478a9c 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -46,9 +46,9 @@ jobs: platform: PyPy steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v4 name: Install Python - name: Install cibuildwheel @@ -113,7 +113,7 @@ jobs: - name: Build wheels run: | - python -m cibuildwheel --output-dir wheelhouse + python3 -m cibuildwheel --output-dir wheelhouse - uses: actions/upload-artifact@v2 with: @@ -124,9 +124,9 @@ jobs: if: "(!contains(github.event.pull_request.labels.*.name, 'disable-test-build'))|| github.event_name == 'push' && endsWith(github.event.ref, 'scylla')" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v4 name: Install Python - name: Build sdist From b9035ba729e28fa1a737e2b3aee4a3fad298c70b Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 12 Oct 2023 17:08:00 +0300 Subject: [PATCH 194/551] CI: switch to pyenv for better python2 support since we need to run older versions of scylla with cqlsh that only support python2, we need a way to still have python2 available --- .github/workflows/integration-tests.yml | 39 ++++++------------------- 1 file changed, 9 insertions(+), 30 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index e8fdc44f46..35463078fe 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -12,44 +12,23 @@ jobs: tests: name: test ${{ matrix.event_loop_manager }} (${{ matrix.python-version }}) if: "!contains(github.event.pull_request.labels.*.name, 'disable-integration-tests')" - runs-on: ${{ matrix.os }} + runs-on: ubuntu-latest strategy: fail-fast: false matrix: - include: - - os: ubuntu-latest - python-version: "3.8" - event_loop_manager: "libev" - - - os: ubuntu-latest - python-version: "3.8" - event_loop_manager: "asyncio" - - - os: ubuntu-latest - python-version: "3.8" - event_loop_manager: "asyncore" - - - os: ubuntu-latest - python-version: "3.11" - event_loop_manager: "libev" - - - os: ubuntu-latest - python-version: "3.11" - event_loop_manager: "asyncio" - - - os: ubuntu-latest - python-version: "3.11" + python-version: ["3.11.4", "3.12.0b4"] + event_loop_manager: ["libev", "asyncio", "asyncore"] + exclude: + - python-version: "3.12.0b4" event_loop_manager: "asyncore" - - os: ubuntu-latest - python-version: "3.12" - event_loop_manager: "libev" steps: - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + - name: setup pyenv ${{ matrix.python-version }} + uses: "gabrielfalcao/pyenv-action@v16" with: - python-version: ${{ matrix.python-version }} + default: 2.7.14 + versions: ${{ matrix.python-version }} - name: Test with pytest run: | export EVENT_LOOP_MANAGER=${{ matrix.event_loop_manager }} From ac9c90db9405ee8c3e5d3cfe676fa1f142a5633d Mon Sep 17 00:00:00 2001 From: Alexey Kartashov Date: Mon, 13 Nov 2023 23:27:31 +0100 Subject: [PATCH 195/551] fix(test_shard_aware.py): Use IN to check the thread name This fixes an issue where thread name set by scylla would contain extra information, such as enterprise version containing current service level Fixes #228 --- tests/integration/standard/test_shard_aware.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/standard/test_shard_aware.py b/tests/integration/standard/test_shard_aware.py index e3d2681a5c..cf8f17e209 100644 --- a/tests/integration/standard/test_shard_aware.py +++ b/tests/integration/standard/test_shard_aware.py @@ -62,7 +62,7 @@ def verify_same_shard_in_tracing(self, results, shard_name): for event in events: LOGGER.info("%s %s %s", event.source, event.thread_name, event.description) for event in events: - self.assertEqual(event.thread_name, shard_name) + self.assertIn(shard_name, event.thread_name) self.assertIn('querying locally', "\n".join([event.description for event in events])) trace_id = results.response_future.get_query_trace_ids()[0] @@ -71,7 +71,7 @@ def verify_same_shard_in_tracing(self, results, shard_name): for event in events: LOGGER.info("%s %s", event.thread, event.activity) for event in events: - self.assertEqual(event.thread, shard_name) + self.assertIn(shard_name, event.thread) self.assertIn('querying locally', "\n".join([event.activity for event in events])) def create_ks_and_cf(self): From dd9dc328546fc32d0861f3d8b3d2c0e4fe18d337 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 12 Oct 2023 23:31:48 +0300 Subject: [PATCH 196/551] io.asyncioreactor: fix deprecated usages for working with python>=3.10 * stop using the loop argument for `asyncio.Lock` and asyncio.Quoue` * on the lock replace `with await` with `async with`, which is the correct syntax for using that lock --- cassandra/io/asyncioreactor.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index ab0e90ae09..6372ab398d 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -1,5 +1,5 @@ from cassandra.connection import Connection, ConnectionShutdown - +import sys import asyncio import logging import os @@ -89,9 +89,11 @@ def __init__(self, *args, **kwargs): self._connect_socket() self._socket.setblocking(0) - - self._write_queue = asyncio.Queue(loop=self._loop) - self._write_queue_lock = asyncio.Lock(loop=self._loop) + loop_args = dict() + if sys.version_info[0] == 3 and sys.version_info[1] < 10: + loop_args['loop'] = self._loop + self._write_queue = asyncio.Queue(**loop_args) + self._write_queue_lock = asyncio.Lock(**loop_args) # see initialize_reactor -- loop is running in a separate thread, so we # have to use a threadsafe call @@ -174,7 +176,7 @@ def push(self, data): async def _push_msg(self, chunks): # This lock ensures all chunks of a message are sequential in the Queue - with await self._write_queue_lock: + async with self._write_queue_lock: for chunk in chunks: self._write_queue.put_nowait(chunk) From 725e62a3009568a7d5a6b1eddebfeb3fa818c7e6 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 12 Oct 2023 23:36:07 +0300 Subject: [PATCH 197/551] tests: ignore asyncio related warning in test_deprecation_warnings since python3.8 we have this warning: ``` DeprecationWarning('The loop argument is deprecated since Python 3.8, and scheduled for removal in Python 3.10.') ``` and it's o.k. to have it since on Python 3.10 and up, we stop using that argument --- tests/integration/cqlengine/model/test_model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/integration/cqlengine/model/test_model.py b/tests/integration/cqlengine/model/test_model.py index 859facf0e1..73096e1b5d 100644 --- a/tests/integration/cqlengine/model/test_model.py +++ b/tests/integration/cqlengine/model/test_model.py @@ -256,10 +256,9 @@ class SensitiveModel(Model): rows[-1] rows[-1:] - # Asyncio complains loudly about old syntax on python 3.7+, so get rid of all of those - relevant_warnings = [warn for warn in w if "with (yield from lock)" not in str(warn.message)] + # ignore DeprecationWarning('The loop argument is deprecated since Python 3.8, and scheduled for removal in Python 3.10.') + relevant_warnings = [warn for warn in w if "The loop argument is deprecated" not in str(warn.message)] - self.assertEqual(len(relevant_warnings), 4) self.assertIn("__table_name_case_sensitive__ will be removed in 4.0.", str(relevant_warnings[0].message)) self.assertIn("__table_name_case_sensitive__ will be removed in 4.0.", str(relevant_warnings[1].message)) self.assertIn("ModelQuerySet indexing with negative indices support will be removed in 4.0.", From 64f3fe99b2e3a36a1e0f538f43d446d43cc41dea Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 18 Oct 2023 10:10:33 +0300 Subject: [PATCH 198/551] tests: skip `test_execute_query_timeout` if running with asyncio asyncio can't do timeouts smaller than 1ms, as this test requires it's a limitation of `asyncio.sleep` Fixes: https://github.com/scylladb/python-driver/issues/263 --- tests/__init__.py | 1 + tests/integration/standard/test_cluster.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index 2d19d29276..1d0d9fe34c 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -105,3 +105,4 @@ def is_windows(): notwindows = unittest.skipUnless(not is_windows(), "This test is not adequate for windows") notpypy = unittest.skipUnless(not platform.python_implementation() == 'PyPy', "This tests is not suitable for pypy") +notasyncio = unittest.skipUnless(not EVENT_LOOP_MANAGER == 'asyncio', "This tests is not suitable for EVENT_LOOP_MANAGER=asyncio") diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 43a1d080ee..36a54aedae 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -39,7 +39,7 @@ from cassandra import connection from cassandra.connection import DefaultEndPoint -from tests import notwindows +from tests import notwindows, notasyncio from tests.integration import use_cluster, get_server_versions, CASSANDRA_VERSION, \ execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler, get_unsupported_lower_protocol, \ get_unsupported_upper_protocol, lessthanprotocolv3, protocolv6, local, CASSANDRA_IP, greaterthanorequalcass30, \ @@ -1139,6 +1139,7 @@ def test_stale_connections_after_shutdown(self): assert False, f'Found stale connections: {result.stdout}' @notwindows + @notasyncio # asyncio can't do timeouts smaller than 1ms, as this test requires def test_execute_query_timeout(self): with TestCluster() as cluster: session = cluster.connect(wait_for_all_pools=True) From d407423c2b2a34be4f26ab5d399f800cf2b82cfd Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Fri, 10 Nov 2023 01:33:08 +0200 Subject: [PATCH 199/551] asyncio: stop using the loop variable when not needed there are some places were we don't need to pass or create the asyncio loop, and we should avoid it --- cassandra/io/asyncioreactor.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index 6372ab398d..fc02392511 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -1,3 +1,5 @@ +import threading + from cassandra.connection import Connection, ConnectionShutdown import sys import asyncio @@ -41,13 +43,12 @@ def end(self): def __init__(self, timeout, callback, loop): delayed = self._call_delayed_coro(timeout=timeout, - callback=callback, - loop=loop) + callback=callback) self._handle = asyncio.run_coroutine_threadsafe(delayed, loop=loop) @staticmethod - async def _call_delayed_coro(timeout, callback, loop): - await asyncio.sleep(timeout, loop=loop) + async def _call_delayed_coro(timeout, callback): + await asyncio.sleep(timeout) return callback() def __lt__(self, other): @@ -111,8 +112,11 @@ def initialize_reactor(cls): if cls._pid != os.getpid(): cls._loop = None if cls._loop is None: - cls._loop = asyncio.new_event_loop() - asyncio.set_event_loop(cls._loop) + try: + cls._loop = asyncio.get_running_loop() + except RuntimeError: + cls._loop = asyncio.new_event_loop() + asyncio.set_event_loop(cls._loop) if not cls._loop_thread: # daemonize so the loop will be shut down on interpreter @@ -165,7 +169,7 @@ def push(self, data): else: chunks = [data] - if self._loop_thread.ident != get_ident(): + if self._loop_thread != threading.current_thread(): asyncio.run_coroutine_threadsafe( self._push_msg(chunks), loop=self._loop From bc5cf17cbb37a27c0ee40562df84d7d72eac32f1 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Fri, 10 Nov 2023 01:34:02 +0200 Subject: [PATCH 200/551] CI: add integration tests for python3.8 --- .github/workflows/integration-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 35463078fe..a8ee628a8d 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.11.4", "3.12.0b4"] + python-version: ["3.8.17", "3.11.4", "3.12.0b4"] event_loop_manager: ["libev", "asyncio", "asyncore"] exclude: - python-version: "3.12.0b4" From dab392db6a80bed5d1c88648b82eda007ef5f714 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 11 Oct 2023 19:23:01 +0300 Subject: [PATCH 201/551] Ignore AttributeError on eventlet import running on python 3.12, we get this error, we should ignore it until eventlet is fixed ``` ImportError while loading conftest '/home/runner/work/python-driver/python-driver/tests/integration/conftest.py'. tests/integration/__init__.py:16: in from cassandra.cluster import Cluster cassandra/cluster.py:103: in init cassandra.cluster from cassandra.io.eventletreactor import EventletConnection cassandra/io/eventletreactor.py:18: in import eventlet .test-venv/lib/python3.12/site-packages/eventlet/__init__.py:17: in from eventlet import convenience .test-venv/lib/python3.12/site-packages/eventlet/convenience.py:7: in from eventlet.green import socket .test-venv/lib/python3.12/site-packages/eventlet/green/socket.py:21: in from eventlet.support import greendns .test-venv/lib/python3.12/site-packages/eventlet/support/greendns.py:45: in from eventlet.green import ssl .test-venv/lib/python3.12/site-packages/eventlet/green/ssl.py:25: in _original_wrap_socket = __ssl.wrap_socket E AttributeError: module 'ssl' has no attribute 'wrap_socket' ``` Ref: https://github.com/eventlet/eventlet/issues/812 --- cassandra/cluster.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6ec04521c7..9530333ba6 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -101,7 +101,9 @@ try: from cassandra.io.eventletreactor import EventletConnection -except ImportError: +except (ImportError, AttributeError): + # AttributeError was add for handling python 3.12 https://github.com/eventlet/eventlet/issues/812 + # TODO: remove it when eventlet issue would be fixed EventletConnection = None try: @@ -115,9 +117,13 @@ def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False - import eventlet.patcher - return eventlet.patcher.is_monkey_patched('socket') - + try: + import eventlet.patcher + return eventlet.patcher.is_monkey_patched('socket') + except (ImportError, AttributeError): + # AttributeError was add for handling python 3.12 https://github.com/eventlet/eventlet/issues/812 + # TODO: remove it when eventlet issue would be fixed + return False def _is_gevent_monkey_patched(): if 'gevent.monkey' not in sys.modules: From 43fbd915f7049b7070d08b7313b0eddf8a5755cf Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 12 Oct 2023 12:27:21 +0300 Subject: [PATCH 202/551] test_cluster: remove `import asyncore` this isn't being used anyhow, and breaking support for python 3.12 --- tests/integration/standard/test_cluster.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 36a54aedae..43356dbd82 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import asyncore import subprocess import unittest From e393ffab297effdb98b87f5100f8c193fff96fa0 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 12 Oct 2023 16:03:43 +0300 Subject: [PATCH 203/551] handle the case asyncore isn't available since asyncore isn't available in python 3.12, we should be gracfully handle it, and enable any other event loop implementions to work --- tests/integration/standard/test_connection.py | 8 ++++++-- .../integration/standard/test_scylla_cloud.py | 19 +++++++++++++------ 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 9eb658316e..0220ffbb1a 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -26,8 +26,12 @@ from cassandra import ConsistencyLevel, OperationTimedOut from cassandra.cluster import NoHostAvailable, ConnectionShutdown, ExecutionProfile, EXEC_PROFILE_DEFAULT -import cassandra.io.asyncorereactor -from cassandra.io.asyncorereactor import AsyncoreConnection + +try: + from cassandra.io.asyncorereactor import AsyncoreConnection +except ImportError: + AsyncoreConnection = None + from cassandra.protocol import QueryMessage from cassandra.connection import Connection from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, HostStateListener diff --git a/tests/integration/standard/test_scylla_cloud.py b/tests/integration/standard/test_scylla_cloud.py index 751bf656c3..4515358085 100644 --- a/tests/integration/standard/test_scylla_cloud.py +++ b/tests/integration/standard/test_scylla_cloud.py @@ -6,15 +6,22 @@ from tests.integration import use_cluster from cassandra.cluster import Cluster, TwistedConnection -from cassandra.io.asyncorereactor import AsyncoreConnection + + from cassandra.io.libevreactor import LibevConnection -from cassandra.io.geventreactor import GeventConnection -from cassandra.io.eventletreactor import EventletConnection -from cassandra.io.asyncioreactor import AsyncioConnection +supported_connection_classes = [LibevConnection, TwistedConnection] +try: + from cassandra.io.asyncorereactor import AsyncoreConnection + supported_connection_classes += [AsyncoreConnection] +except ImportError: + pass + +#from cassandra.io.geventreactor import GeventConnection +#from cassandra.io.eventletreactor import EventletConnection +#from cassandra.io.asyncioreactor import AsyncioConnection -supported_connection_classes = [AsyncoreConnection, LibevConnection, TwistedConnection] # need to run them with specific configuration like `gevent.monkey.patch_all()` or under async functions -unsupported_connection_classes = [GeventConnection, AsyncioConnection, EventletConnection] +# unsupported_connection_classes = [GeventConnection, AsyncioConnection, EventletConnection] class ScyllaCloudConfigTests(TestCase): From c02c8f7bb202158b752e548145edc2bc37c99bd8 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 18 Oct 2023 00:14:45 +0300 Subject: [PATCH 204/551] cassandra/cluster.py: make asyncio default if asyncore not available since python 3.12 is deprecating asyncore, we should make asyncio the default fallback event loop when asyncore isn't available asyncio now that it's fixed and we verified it's working (passing the integration suite) in multiple python versions we support (from 3.8 - 3.12) --- cassandra/cluster.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 9530333ba6..1de3a6f508 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -143,7 +143,10 @@ def _is_gevent_monkey_patched(): try: from cassandra.io.libevreactor import LibevConnection as DefaultConnection # NOQA except ImportError: - from cassandra.io.asyncorereactor import AsyncoreConnection as DefaultConnection # NOQA + try: + from cassandra.io.asyncorereactor import AsyncoreConnection as DefaultConnection # NOQA + except ImportError: + from cassandra.io.asyncioreactor import AsyncioConnection as DefaultConnection # NOQA # Forces load of utf8 encoding module to avoid deadlock that occurs # if code that is being imported tries to import the module in a seperate From 1cc6ccc90821af44dcaa79b789625f1476f16706 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 19 Nov 2023 18:35:31 +0200 Subject: [PATCH 205/551] CI: enable builds of python 3.12 wheels --- .github/workflows/build-experimental.yml | 2 +- .github/workflows/build-push.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-experimental.yml b/.github/workflows/build-experimental.yml index 182f57d239..bfc6bd0949 100644 --- a/.github/workflows/build-experimental.yml +++ b/.github/workflows/build-experimental.yml @@ -4,7 +4,7 @@ on: [push, pull_request] env: CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" - CIBW_BUILD: "cp38* cp39* cp310* cp311*" + CIBW_BUILD: "cp39* cp310* cp311* cp312*" CIBW_SKIP: "*musllinux*" jobs: build_wheels: diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 2118478a9c..74f0415822 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -10,7 +10,7 @@ env: CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libffi-devel libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" - CIBW_SKIP: cp35* cp36* *musllinux* cp312* + CIBW_SKIP: cp35* cp36* *musllinux* jobs: build_wheels: From 679ad2490b7bfb440cbda122712380acff925dde Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 27 Nov 2023 15:47:38 +0200 Subject: [PATCH 206/551] asyncioreactor: make sure task isn't deleted midway in push function, self._loop.create_task is called and it's return value is ignored. While the tests may pass now, this code is not correct and this example is called out in docs as a source of bugs, as python docs suggests. Ref: https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task --- cassandra/io/asyncioreactor.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index fc02392511..4876b5be1e 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -106,6 +106,8 @@ def __init__(self, *args, **kwargs): ) self._send_options_message() + self._background_tasks = set() + @classmethod def initialize_reactor(cls): with cls._lock: @@ -176,7 +178,10 @@ def push(self, data): ) else: # avoid races/hangs by just scheduling this, not using threadsafe - self._loop.create_task(self._push_msg(chunks)) + task = self._loop.create_task(self._push_msg(chunks)) + + self._background_tasks.add(task) + task.add_done_callback(self._background_tasks.discard) async def _push_msg(self, chunks): # This lock ensures all chunks of a message are sequential in the Queue From facabc594246b400f9debb004f5fd8c35aacc006 Mon Sep 17 00:00:00 2001 From: Alexey Kartashov Date: Fri, 1 Dec 2023 15:52:32 +0100 Subject: [PATCH 207/551] cqlengine: Remove deepcopy on UserType deserialization This change makes it so newly instanced UserType during deserialization isn't immediately copied by deepcopy, which could cause huge slowdown if that UserType contains a lot of data or nested UserTypes, in which case the deepcopy calls would cascade as each to_python call would eventually clone parts of source object. As there isn't a lot of information on why this deepcopy is here in the first place this change could potentially break something. Running integration tests against this commit does not produce regressions, so this call looks safe to remove, but I'm leaving this warning here for the future reference. Fixes #152 --- cassandra/cqlengine/columns.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index 49116129fc..e0012858b4 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -1038,12 +1038,11 @@ def to_python(self, value): if value is None: return - copied_value = deepcopy(value) for name, field in self.user_type._fields.items(): - if copied_value[name] is not None or isinstance(field, BaseContainerColumn): - copied_value[name] = field.to_python(copied_value[name]) + if value[name] is not None or isinstance(field, BaseContainerColumn): + value[name] = field.to_python(value[name]) - return copied_value + return value def to_database(self, value): if value is None: From 6788a7c23f158260e175afd2d20e297e1dadcfd2 Mon Sep 17 00:00:00 2001 From: Piotr Grabowski Date: Fri, 8 Dec 2023 16:44:11 +0100 Subject: [PATCH 208/551] connection: fix logging of non-IP sockets Before this fix, the debug log would crash _connect_socket for UNIX domain sockets. getsockname() for UNIX domain sockets returns a single string instead of a tuple (as is the case for IPv4/IPv6). Therefore the code could crash as it tried to get the second element of a non-tuple (empty string): Traceback (most recent call last): File "/home/margdoc/Workspace/scylla/maintenance_mode_testing.py", line 5, in s = c.connect() ^^^^^^^^^^^ File "cassandra/cluster.py", line 1750, in cassandra.cluster.Cluster.connect File "cassandra/cluster.py", line 1776, in cassandra.cluster.Cluster.connect File "cassandra/cluster.py", line 1763, in cassandra.cluster.Cluster.connect File "cassandra/cluster.py", line 3581, in cassandra.cluster.ControlConnection.connect File "cassandra/cluster.py", line 3642, in cassandra.cluster.ControlConnection._reconnect_internal cassandra.cluster.NoHostAvailable: ('Unable to connect to any servers', {'test_socket': IndexError('string index out of range')}) Fix the issue by not unpacking those values and just printing them as-is, relying on %s formatter to print all elements of a tuple (host, port) for IP sockets and string for UNIX domain sockets. The printed log is not formatted as nice as before, however this is a DEBUG print so few users will ever see it. The new approach should work with any format of getsockname(). Fixes #278 --- cassandra/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 295066694b..6007b26a27 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -940,7 +940,7 @@ def _connect_socket(self): self._initiate_connection(sockaddr) self._socket.settimeout(None) local_addr = self._socket.getsockname() - log.debug('Connection %s %s:%s -> %s:%s', id(self), local_addr[0], local_addr[1], sockaddr[0], sockaddr[1]) + log.debug("Connection %s: '%s' -> '%s'", id(self), local_addr, sockaddr) if self._check_hostname: self._match_hostname() sockerr = None From e7532b10d0c6c9839bf5bcbcc6a834da0e243f53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 22 Dec 2023 00:34:10 +0100 Subject: [PATCH 209/551] Release 3.26.4 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 318627cfe1..53a0cad5e7 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 3) +__version_info__ = (3, 26, 4) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 98d4883094..b8fc66275e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,10 +10,10 @@ # -- Global variables # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.3-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.4-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.26.3-scylla' +LATEST_VERSION = '3.26.4-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From e8d7151d615eeaaabd76ed178f373bbdd0489aaf Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Wed, 3 Jan 2024 09:01:36 +0100 Subject: [PATCH 210/551] Add parsing TABLETS_ROUTING_V1 extension to ProtocolFeatures In order for Scylla to send the tablet info, the driver must tell the database during connection handshake that it is able to interpret it. This negotation is added as a part of ProtocolFeatures class. --- cassandra/protocol_features.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cassandra/protocol_features.py b/cassandra/protocol_features.py index fc7c5b060e..4eb7019f84 100644 --- a/cassandra/protocol_features.py +++ b/cassandra/protocol_features.py @@ -6,22 +6,26 @@ RATE_LIMIT_ERROR_EXTENSION = "SCYLLA_RATE_LIMIT_ERROR" +TABLETS_ROUTING_V1 = "TABLETS_ROUTING_V1" class ProtocolFeatures(object): rate_limit_error = None shard_id = 0 sharding_info = None + tablets_routing_v1 = False - def __init__(self, rate_limit_error=None, shard_id=0, sharding_info=None): + def __init__(self, rate_limit_error=None, shard_id=0, sharding_info=None, tablets_routing_v1=False): self.rate_limit_error = rate_limit_error self.shard_id = shard_id self.sharding_info = sharding_info + self.tablets_routing_v1 = tablets_routing_v1 @staticmethod def parse_from_supported(supported): rate_limit_error = ProtocolFeatures.maybe_parse_rate_limit_error(supported) shard_id, sharding_info = ProtocolFeatures.parse_sharding_info(supported) - return ProtocolFeatures(rate_limit_error, shard_id, sharding_info) + tablets_routing_v1 = ProtocolFeatures.parse_tablets_info(supported) + return ProtocolFeatures(rate_limit_error, shard_id, sharding_info, tablets_routing_v1) @staticmethod def maybe_parse_rate_limit_error(supported): @@ -43,6 +47,8 @@ def get_cql_extension_field(vals, key): def add_startup_options(self, options): if self.rate_limit_error is not None: options[RATE_LIMIT_ERROR_EXTENSION] = "" + if self.tablets_routing_v1: + options[TABLETS_ROUTING_V1] = "" @staticmethod def parse_sharding_info(options): @@ -63,3 +69,6 @@ def parse_sharding_info(options): shard_aware_port, shard_aware_port_ssl) + @staticmethod + def parse_tablets_info(options): + return TABLETS_ROUTING_V1 in options From 8b2359f06305f9e7cf7d4978d0b0ab9f0bd58de6 Mon Sep 17 00:00:00 2001 From: Curt Buechter Date: Wed, 10 Jan 2024 12:04:17 -0600 Subject: [PATCH 211/551] Fix typo --- docs/getting-started.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting-started.rst b/docs/getting-started.rst index 59a2acbd04..1969b503ba 100644 --- a/docs/getting-started.rst +++ b/docs/getting-started.rst @@ -188,7 +188,7 @@ of the driver may use the same placeholders for both). Passing Parameters to CQL Queries --------------------------------- -Althought it is not recommended, you can also pass parameters to non-prepared +Although it is not recommended, you can also pass parameters to non-prepared statements. The driver supports two forms of parameter place-holders: positional and named. From 669e516839a20fd7969d4117a0f0d330fe796163 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Fri, 12 Jan 2024 10:47:38 +0100 Subject: [PATCH 212/551] Add support for unix domain sockets to WhiteListRoundRobinPolicy --- cassandra/policies.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index fa1e8cf385..b4159455bf 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -19,6 +19,7 @@ import socket import warnings from cassandra import WriteType as WT +from cassandra.connection import UnixSocketEndPoint # This is done this way because WriteType was originally @@ -422,8 +423,13 @@ def __init__(self, hosts): connections to. """ self._allowed_hosts = tuple(hosts) - self._allowed_hosts_resolved = [endpoint[4][0] for a in self._allowed_hosts - for endpoint in socket.getaddrinfo(a, None, socket.AF_UNSPEC, socket.SOCK_STREAM)] + self._allowed_hosts_resolved = [] + for h in self._allowed_hosts: + if isinstance(h, UnixSocketEndPoint): + self._allowed_hosts_resolved.append(h._unix_socket_path) + else: + self._allowed_hosts_resolved.extend([endpoint[4][0] + for endpoint in socket.getaddrinfo(h, None, socket.AF_UNSPEC, socket.SOCK_STREAM)]) RoundRobinPolicy.__init__(self) From 02e7ce969c859c305f09e13e852aba0f2f6c47e4 Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Fri, 28 Jul 2023 09:26:54 +0200 Subject: [PATCH 213/551] Use tablets in token and shard awareness Add mechanism to parse system.tablets periodically. In TokenAwarePolicy check if keyspace uses tablets if so try to use them to find replicas. Make shard awareness work when using tablets. Everything is wrapped in experimental setting, because tablets are still experimental in ScyllaDB and changes in the tablets format are possible. --- cassandra/cluster.py | 34 ++++++++- cassandra/metadata.py | 2 + cassandra/policies.py | 16 ++++- cassandra/pool.py | 28 ++++++-- cassandra/query.py | 12 +++- cassandra/tablets.py | 107 +++++++++++++++++++++++++++++ tests/unit/test_policies.py | 5 ++ tests/unit/test_response_future.py | 10 +-- 8 files changed, 199 insertions(+), 15 deletions(-) create mode 100644 cassandra/tablets.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6ec04521c7..e3ddc74709 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -41,7 +41,7 @@ import weakref from weakref import WeakValueDictionary -from cassandra import (ConsistencyLevel, AuthenticationFailed, +from cassandra import (ConsistencyLevel, AuthenticationFailed, InvalidRequest, OperationTimedOut, UnsupportedOperation, SchemaTargetType, DriverException, ProtocolVersion, UnresolvableContactPoints) @@ -51,6 +51,7 @@ EndPoint, DefaultEndPoint, DefaultEndPointFactory, ContinuousPagingState, SniEndPointFactory, ConnectionBusy) from cassandra.cqltypes import UserType +import cassandra.cqltypes as types from cassandra.encoder import Encoder from cassandra.protocol import (QueryMessage, ResultMessage, ErrorMessage, ReadTimeoutErrorMessage, @@ -79,6 +80,7 @@ named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET, HostTargetingStatement) from cassandra.marshal import int64_pack +from cassandra.tablets import Tablet, Tablets from cassandra.timestamps import MonotonicTimestampGenerator from cassandra.compat import Mapping from cassandra.util import _resolve_contact_points_to_string_map, Version @@ -1775,6 +1777,14 @@ def connect(self, keyspace=None, wait_for_all_pools=False): self.shutdown() raise + # Update the information about tablet support after connection handshake. + self.load_balancing_policy._tablets_routing_v1 = self.control_connection._tablets_routing_v1 + child_policy = self.load_balancing_policy.child_policy if hasattr(self.load_balancing_policy, 'child_policy') else None + while child_policy is not None: + if hasattr(child_policy, '_tablet_routing_v1'): + child_policy._tablet_routing_v1 = self.control_connection._tablets_routing_v1 + child_policy = child_policy.child_policy if hasattr(child_policy, 'child_policy') else None + self.profile_manager.check_supported() # todo: rename this method if self.idle_heartbeat_interval: @@ -2389,7 +2399,6 @@ def add_prepared(self, query_id, prepared_statement): with self._prepared_statement_lock: self._prepared_statements[query_id] = prepared_statement - class Session(object): """ A collection of connection pools for each host in the cluster. @@ -3541,6 +3550,7 @@ class PeersQueryType(object): _schema_meta_page_size = 1000 _uses_peers_v2 = True + _tablets_routing_v1 = False # for testing purposes _time = time @@ -3674,6 +3684,8 @@ def _try_connect(self, host): # If sharding information is available, it's a ScyllaDB cluster, so do not use peers_v2 table. if connection.features.sharding_info is not None: self._uses_peers_v2 = False + + self._tablets_routing_v1 = connection.features.tablets_routing_v1 # use weak references in both directions # _clear_watcher will be called when this ControlConnection is about to be finalized @@ -4600,7 +4612,10 @@ def _query(self, host, message=None, cb=None): connection = None try: # TODO get connectTimeout from cluster settings - connection, request_id = pool.borrow_connection(timeout=2.0, routing_key=self.query.routing_key if self.query else None) + if self.query: + connection, request_id = pool.borrow_connection(timeout=2.0, routing_key=self.query.routing_key, keyspace=self.query.keyspace, table=self.query.table) + else: + connection, request_id = pool.borrow_connection(timeout=2.0) self._connection = connection result_meta = self.prepared_statement.result_metadata if self.prepared_statement else [] @@ -4719,6 +4734,19 @@ def _set_result(self, host, connection, pool, response): self._warnings = getattr(response, 'warnings', None) self._custom_payload = getattr(response, 'custom_payload', None) + if self._custom_payload and self.session.cluster.control_connection._tablets_routing_v1 and 'tablets-routing-v1' in self._custom_payload: + protocol = self.session.cluster.protocol_version + info = self._custom_payload.get('tablets-routing-v1') + ctype = types.lookup_casstype('TupleType(LongType, LongType, ListType(TupleType(UUIDType, Int32Type)))') + tablet_routing_info = ctype.from_binary(info, protocol) + first_token = tablet_routing_info[0] + last_token = tablet_routing_info[1] + tablet_replicas = tablet_routing_info[2] + tablet = Tablet.from_row(first_token, last_token, tablet_replicas) + keyspace = self.query.keyspace + table = self.query.table + self.session.cluster.metadata._tablets.add_tablet(keyspace, table, tablet) + if isinstance(response, ResultMessage): if response.kind == RESULT_KIND_SET_KEYSPACE: session = getattr(self, 'session', None) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 5f1cfa5beb..c2993eaa3f 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -44,6 +44,7 @@ from cassandra.pool import HostDistance from cassandra.connection import EndPoint from cassandra.compat import Mapping +from cassandra.tablets import Tablets log = logging.getLogger(__name__) @@ -126,6 +127,7 @@ def __init__(self): self._hosts = {} self._host_id_by_endpoint = {} self._hosts_lock = RLock() + self._tablets = Tablets({}) def export_schema_as_string(self): """ diff --git a/cassandra/policies.py b/cassandra/policies.py index fa1e8cf385..cfacb16d81 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -335,6 +335,7 @@ class TokenAwarePolicy(LoadBalancingPolicy): _child_policy = None _cluster_metadata = None + _tablets_routing_v1 = False shuffle_replicas = False """ Yield local replicas in a random order. @@ -346,6 +347,7 @@ def __init__(self, child_policy, shuffle_replicas=False): def populate(self, cluster, hosts): self._cluster_metadata = cluster.metadata + self._tablets_routing_v1 = cluster.control_connection._tablets_routing_v1 self._child_policy.populate(cluster, hosts) def check_supported(self): @@ -376,7 +378,19 @@ def make_query_plan(self, working_keyspace=None, query=None): for host in child.make_query_plan(keyspace, query): yield host else: - replicas = self._cluster_metadata.get_replicas(keyspace, routing_key) + replicas = [] + if self._tablets_routing_v1: + tablet = self._cluster_metadata._tablets.get_tablet_for_key(keyspace, query.table, self._cluster_metadata.token_map.token_class.from_key(routing_key)) + + if tablet is not None: + replicas_mapped = set(map(lambda r: r[0], tablet.replicas)) + child_plan = child.make_query_plan(keyspace, query) + + replicas = [host for host in child_plan if host.host_id in replicas_mapped] + + if replicas == []: + replicas = self._cluster_metadata.get_replicas(keyspace, routing_key) + if self.shuffle_replicas: shuffle(replicas) for replica in replicas: diff --git a/cassandra/pool.py b/cassandra/pool.py index 110b682c72..bb176b2ee7 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -392,6 +392,8 @@ class HostConnection(object): # the number below, all excess connections will be closed. max_excess_connections_per_shard_multiplier = 3 + tablets_routing_v1 = False + def __init__(self, host, host_distance, session): self.host = host self.host_distance = host_distance @@ -436,10 +438,11 @@ def __init__(self, host, host_distance, session): if first_connection.features.sharding_info and not self._session.cluster.shard_aware_options.disable: self.host.sharding_info = first_connection.features.sharding_info self._open_connections_for_all_shards(first_connection.features.shard_id) + self.tablets_routing_v1 = first_connection.features.tablets_routing_v1 log.debug("Finished initializing connection for host %s", self.host) - def _get_connection_for_routing_key(self, routing_key=None): + def _get_connection_for_routing_key(self, routing_key=None, keyspace=None, table=None): if self.is_shutdown: raise ConnectionException( "Pool for %s is shutdown" % (self.host,), self.host) @@ -450,7 +453,22 @@ def _get_connection_for_routing_key(self, routing_key=None): shard_id = None if not self._session.cluster.shard_aware_options.disable and self.host.sharding_info and routing_key: t = self._session.cluster.metadata.token_map.token_class.from_key(routing_key) - shard_id = self.host.sharding_info.shard_id_from_token(t.value) + + shard_id = None + if self.tablets_routing_v1 and table is not None: + if keyspace is None: + keyspace = self._keyspace + + tablet = self._session.cluster.metadata._tablets.get_tablet_for_key(keyspace, table, t) + + if tablet is not None: + for replica in tablet.replicas: + if replica[0] == self.host.host_id: + shard_id = replica[1] + break + + if shard_id is None: + shard_id = self.host.sharding_info.shard_id_from_token(t.value) conn = self._connections.get(shard_id) @@ -496,15 +514,15 @@ def _get_connection_for_routing_key(self, routing_key=None): return random.choice(active_connections) return random.choice(list(self._connections.values())) - def borrow_connection(self, timeout, routing_key=None): - conn = self._get_connection_for_routing_key(routing_key) + def borrow_connection(self, timeout, routing_key=None, keyspace=None, table=None): + conn = self._get_connection_for_routing_key(routing_key, keyspace, table) start = time.time() remaining = timeout last_retry = False while True: if conn.is_closed: # The connection might have been closed in the meantime - if so, try again - conn = self._get_connection_for_routing_key(routing_key) + conn = self._get_connection_for_routing_key(routing_key, keyspace, table) with conn.lock: if (not conn.is_closed or last_retry) and conn.in_flight < conn.max_request_id: # On last retry we ignore connection status, since it is better to return closed connection than diff --git a/cassandra/query.py b/cassandra/query.py index f7a5b8fdf5..e0d6f87fd6 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -253,6 +253,13 @@ class Statement(object): .. versionadded:: 2.1.3 """ + table = None + """ + The string name of the table this query acts on. This is used when the tablet + experimental feature is enabled and in the same time :class`~.TokenAwarePolicy` + is configured in the profile load balancing policy. + """ + custom_payload = None """ :ref:`custom_payload` to be passed to the server. @@ -272,7 +279,7 @@ class Statement(object): def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None, - is_idempotent=False): + is_idempotent=False, table=None): if retry_policy and not hasattr(retry_policy, 'on_read_timeout'): # just checking one method to detect positional parameter errors raise ValueError('retry_policy should implement cassandra.policies.RetryPolicy') if retry_policy is not None: @@ -286,6 +293,8 @@ def __init__(self, retry_policy=None, consistency_level=None, routing_key=None, self.fetch_size = fetch_size if keyspace is not None: self.keyspace = keyspace + if table is not None: + self.table = table if custom_payload is not None: self.custom_payload = custom_payload self.is_idempotent = is_idempotent @@ -548,6 +557,7 @@ def __init__(self, prepared_statement, retry_policy=None, consistency_level=None meta = prepared_statement.column_metadata if meta: self.keyspace = meta[0].keyspace_name + self.table = meta[0].table_name Statement.__init__(self, retry_policy, consistency_level, routing_key, serial_consistency_level, fetch_size, keyspace, custom_payload, diff --git a/cassandra/tablets.py b/cassandra/tablets.py new file mode 100644 index 0000000000..aeba7fa8ad --- /dev/null +++ b/cassandra/tablets.py @@ -0,0 +1,107 @@ +# Experimental, this interface and use may change +from threading import Lock + +class Tablet(object): + """ + Represents a single ScyllaDB tablet. + It stores information about each replica, its host and shard, + and the token interval in the format (first_token, last_token]. + """ + first_token = 0 + last_token = 0 + replicas = None + + def __init__(self, first_token = 0, last_token = 0, replicas = None): + self.first_token = first_token + self.last_token = last_token + self.replicas = replicas + + def __str__(self): + return "" \ + % (self.first_token, self.last_token, self.replicas) + __repr__ = __str__ + + @staticmethod + def _is_valid_tablet(replicas): + return replicas is not None and len(replicas) != 0 + + @staticmethod + def from_row(first_token, last_token, replicas): + if Tablet._is_valid_tablet(replicas): + tablet = Tablet(first_token, last_token,replicas) + return tablet + return None + +# Experimental, this interface and use may change +class Tablets(object): + _lock = None + _tablets = {} + + def __init__(self, tablets): + self._tablets = tablets + self._lock = Lock() + + def get_tablet_for_key(self, keyspace, table, t): + tablet = self._tablets.get((keyspace, table), []) + if tablet == []: + return None + + id = bisect_left(tablet, t.value, key = lambda tablet: tablet.last_token) + if id < len(tablet) and t.value > tablet[id].first_token: + return tablet[id] + return None + + def add_tablet(self, keyspace, table, tablet): + with self._lock: + tablets_for_table = self._tablets.setdefault((keyspace, table), []) + + # find first overlaping range + start = bisect_left(tablets_for_table, tablet.first_token, key = lambda t: t.first_token) + if start > 0 and tablets_for_table[start - 1].last_token > tablet.first_token: + start = start - 1 + + # find last overlaping range + end = bisect_left(tablets_for_table, tablet.last_token, key = lambda t: t.last_token) + if end < len(tablets_for_table) and tablets_for_table[end].first_token >= tablet.last_token: + end = end - 1 + + if start <= end: + del tablets_for_table[start:end + 1] + + tablets_for_table.insert(start, tablet) + +# bisect.bisect_left implementation from Python 3.11, needed untill support for +# Python < 3.10 is dropped, it is needed to use `key` to extract last_token from +# Tablet list - better solution performance-wise than materialize list of last_tokens +def bisect_left(a, x, lo=0, hi=None, *, key=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(i, x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + # Note, the comparison uses "<" to match the + # __lt__() logic in list.sort() and in heapq. + if key is None: + while lo < hi: + mid = (lo + hi) // 2 + if a[mid] < x: + lo = mid + 1 + else: + hi = mid + else: + while lo < hi: + mid = (lo + hi) // 2 + if key(a[mid]) < x: + lo = mid + 1 + else: + hi = mid + return lo diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index a6c63dcfdc..d9ff59fd7a 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -526,6 +526,7 @@ class TokenAwarePolicyTest(unittest.TestCase): def test_wrap_round_robin(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) + cluster.control_connection._tablets_routing_v1 = False hosts = [Host(DefaultEndPoint(str(i)), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() @@ -557,6 +558,7 @@ def get_replicas(keyspace, packed_key): def test_wrap_dc_aware(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) + cluster.control_connection._tablets_routing_v1 = False hosts = [Host(DefaultEndPoint(str(i)), SimpleConvictionPolicy) for i in range(4)] for host in hosts: host.set_up() @@ -685,6 +687,7 @@ def test_statement_keyspace(self): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) + cluster.control_connection._tablets_routing_v1 = False replicas = hosts[2:] cluster.metadata.get_replicas.return_value = replicas @@ -775,6 +778,7 @@ def _assert_shuffle(self, patched_shuffle, keyspace, routing_key): cluster = Mock(spec=Cluster) cluster.metadata = Mock(spec=Metadata) + cluster.control_connection._tablets_routing_v1 = False replicas = hosts[2:] cluster.metadata.get_replicas.return_value = replicas @@ -1448,6 +1452,7 @@ def test_query_plan_deferred_to_child(self): def test_wrap_token_aware(self): cluster = Mock(spec=Cluster) + cluster.control_connection._tablets_routing_v1 = False hosts = [Host(DefaultEndPoint("127.0.0.{}".format(i)), SimpleConvictionPolicy) for i in range(1, 6)] for host in hosts: host.set_up() diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 4e212a0355..29cddec7a8 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -75,7 +75,7 @@ def test_result_message(self): rf.send_request() rf.session._pools.get.assert_called_once_with('ip1') - pool.borrow_connection.assert_called_once_with(timeout=ANY, routing_key=ANY) + pool.borrow_connection.assert_called_once_with(timeout=ANY, routing_key=ANY, keyspace=ANY, table=ANY) connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) @@ -257,7 +257,7 @@ def test_retry_policy_says_retry(self): rf.send_request() rf.session._pools.get.assert_called_once_with('ip1') - pool.borrow_connection.assert_called_once_with(timeout=ANY, routing_key=ANY) + pool.borrow_connection.assert_called_once_with(timeout=ANY, routing_key=ANY, keyspace=ANY, table=ANY) connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) result = Mock(spec=UnavailableErrorMessage, info={}) @@ -276,7 +276,7 @@ def test_retry_policy_says_retry(self): # it should try again with the same host since this was # an UnavailableException rf.session._pools.get.assert_called_with(host) - pool.borrow_connection.assert_called_with(timeout=ANY, routing_key=ANY) + pool.borrow_connection.assert_called_with(timeout=ANY, routing_key=ANY, keyspace=ANY, table=ANY) connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) def test_retry_with_different_host(self): @@ -291,7 +291,7 @@ def test_retry_with_different_host(self): rf.send_request() rf.session._pools.get.assert_called_once_with('ip1') - pool.borrow_connection.assert_called_once_with(timeout=ANY, routing_key=ANY) + pool.borrow_connection.assert_called_once_with(timeout=ANY, routing_key=ANY, keyspace=ANY, table=ANY) connection.send_msg.assert_called_once_with(rf.message, 1, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) self.assertEqual(ConsistencyLevel.QUORUM, rf.message.consistency_level) @@ -310,7 +310,7 @@ def test_retry_with_different_host(self): # it should try with a different host rf.session._pools.get.assert_called_with('ip2') - pool.borrow_connection.assert_called_with(timeout=ANY, routing_key=ANY) + pool.borrow_connection.assert_called_with(timeout=ANY, routing_key=ANY, keyspace=ANY, table=ANY) connection.send_msg.assert_called_with(rf.message, 2, cb=ANY, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=[]) # the consistency level should be the same From c3f194b4508b82a8d4e46ddbec5008d5b0d05c2f Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Tue, 1 Aug 2023 09:27:46 +0200 Subject: [PATCH 214/551] Add integration and unit tests --- .github/workflows/integration-tests.yml | 7 + ci/run_integration_test.sh | 5 +- tests/integration/__init__.py | 10 +- tests/integration/experiments/test_tablets.py | 156 ++++++++++++++++++ tests/unit/test_policies.py | 3 +- tests/unit/test_response_future.py | 1 + tests/unit/test_tablets.py | 88 ++++++++++ 7 files changed, 262 insertions(+), 8 deletions(-) create mode 100644 tests/integration/experiments/test_tablets.py create mode 100644 tests/unit/test_tablets.py diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index a8ee628a8d..d263b52057 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -32,4 +32,11 @@ jobs: - name: Test with pytest run: | export EVENT_LOOP_MANAGER=${{ matrix.event_loop_manager }} + export SCYLLA_VERSION='release:5.1' ./ci/run_integration_test.sh tests/integration/standard/ tests/integration/cqlengine/ + + - name: Test tablets + run: | + export EVENT_LOOP_MANAGER=${{ matrix.event_loop_manager }} + export SCYLLA_VERSION='unstable/master:2024-01-03T08:06:57Z' + ./ci/run_integration_test.sh tests/integration/experiments/ diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh index b064b45399..2796a33e61 100755 --- a/ci/run_integration_test.sh +++ b/ci/run_integration_test.sh @@ -15,8 +15,6 @@ if (( aio_max_nr != aio_max_nr_recommended_value )); then fi fi -SCYLLA_RELEASE='release:5.1' - python3 -m venv .test-venv source .test-venv/bin/activate pip install -U pip wheel setuptools @@ -33,12 +31,11 @@ pip install https://github.com/scylladb/scylla-ccm/archive/master.zip # download version -ccm create scylla-driver-temp -n 1 --scylla --version ${SCYLLA_RELEASE} +ccm create scylla-driver-temp -n 1 --scylla --version ${SCYLLA_VERSION} ccm remove # run test -export SCYLLA_VERSION=${SCYLLA_RELEASE} export MAPPED_SCYLLA_VERSION=3.11.4 PROTOCOL_VERSION=4 pytest -rf --import-mode append $* diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e728bc7740..52e8b5dad4 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -372,7 +372,8 @@ def _id_and_mark(f): # 1. unittest doesn't skip setUpClass when used on class and we need it sometimes # 2. unittest doesn't have conditional xfail, and I prefer to use pytest than custom decorator # 3. unittest doesn't have a reason argument, so you don't see the reason in pytest report -requires_collection_indexes = pytest.mark.skipif(SCYLLA_VERSION is not None and Version(SCYLLA_VERSION.split(':')[1]) < Version('5.2'), +# TODO remove second check when we stop using unstable version in CI for tablets +requires_collection_indexes = pytest.mark.skipif(SCYLLA_VERSION is not None and (len(SCYLLA_VERSION.split('/')) != 0 or Version(SCYLLA_VERSION.split(':')[1]) < Version('5.2')), reason='Scylla supports collection indexes from 5.2 onwards') requires_custom_indexes = pytest.mark.skipif(SCYLLA_VERSION is not None, reason='Scylla does not support SASI or any other CUSTOM INDEX class') @@ -501,7 +502,7 @@ def start_cluster_wait_for_up(cluster): def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_keyspace=True, ccm_options=None, - configuration_options=None, dse_options=None, use_single_interface=USE_SINGLE_INTERFACE): + configuration_options=None, dse_options=None, use_single_interface=USE_SINGLE_INTERFACE, use_tablets=False): configuration_options = configuration_options or {} dse_options = dse_options or {} workloads = workloads or [] @@ -611,7 +612,10 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, # CDC is causing an issue (can't start cluster with multiple seeds) # Selecting only features we need for tests, i.e. anything but CDC. CCM_CLUSTER = CCMScyllaCluster(path, cluster_name, **ccm_options) - CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf'], 'start_native_transport': True}) + if use_tablets: + CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf', 'consistent-topology-changes', 'tablets'], 'start_native_transport': True}) + else: + CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf'], 'start_native_transport': True}) # Permit IS NOT NULL restriction on non-primary key columns of a materialized view # This allows `test_metadata_with_quoted_identifiers` to run diff --git a/tests/integration/experiments/test_tablets.py b/tests/integration/experiments/test_tablets.py new file mode 100644 index 0000000000..c9e5c3ea3c --- /dev/null +++ b/tests/integration/experiments/test_tablets.py @@ -0,0 +1,156 @@ +import time +import unittest +import pytest +import os +from cassandra.cluster import Cluster +from cassandra.policies import ConstantReconnectionPolicy, RoundRobinPolicy, TokenAwarePolicy + +from tests.integration import PROTOCOL_VERSION, use_cluster +from tests.unit.test_host_connection_pool import LOGGER + +def setup_module(): + use_cluster('tablets', [3], start=True, use_tablets=True) + +class TestTabletsIntegration(unittest.TestCase): + @classmethod + def setup_class(cls): + cls.cluster = Cluster(contact_points=["127.0.0.1", "127.0.0.2", "127.0.0.3"], protocol_version=PROTOCOL_VERSION, + load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()), + reconnection_policy=ConstantReconnectionPolicy(1)) + cls.session = cls.cluster.connect() + cls.create_ks_and_cf(cls) + cls.create_data(cls.session) + + @classmethod + def teardown_class(cls): + cls.cluster.shutdown() + + def verify_same_host_in_tracing(self, results): + traces = results.get_query_trace() + events = traces.events + host_set = set() + for event in events: + LOGGER.info("TRACE EVENT: %s %s %s", event.source, event.thread_name, event.description) + host_set.add(event.source) + + self.assertEqual(len(host_set), 1) + self.assertIn('locally', "\n".join([event.description for event in events])) + + trace_id = results.response_future.get_query_trace_ids()[0] + traces = self.session.execute("SELECT * FROM system_traces.events WHERE session_id = %s", (trace_id,)) + events = [event for event in traces] + host_set = set() + for event in events: + LOGGER.info("TRACE EVENT: %s %s", event.source, event.activity) + host_set.add(event.source) + + self.assertEqual(len(host_set), 1) + self.assertIn('locally', "\n".join([event.activity for event in events])) + + def verify_same_shard_in_tracing(self, results): + traces = results.get_query_trace() + events = traces.events + shard_set = set() + for event in events: + LOGGER.info("TRACE EVENT: %s %s %s", event.source, event.thread_name, event.description) + shard_set.add(event.thread_name) + + self.assertEqual(len(shard_set), 1) + self.assertIn('locally', "\n".join([event.description for event in events])) + + trace_id = results.response_future.get_query_trace_ids()[0] + traces = self.session.execute("SELECT * FROM system_traces.events WHERE session_id = %s", (trace_id,)) + events = [event for event in traces] + shard_set = set() + for event in events: + LOGGER.info("TRACE EVENT: %s %s", event.thread, event.activity) + shard_set.add(event.thread) + + self.assertEqual(len(shard_set), 1) + self.assertIn('locally', "\n".join([event.activity for event in events])) + + def create_ks_and_cf(self): + self.session.execute( + """ + DROP KEYSPACE IF EXISTS test1 + """ + ) + self.session.execute( + """ + CREATE KEYSPACE test1 + WITH replication = { + 'class': 'NetworkTopologyStrategy', + 'replication_factor': 1, + 'initial_tablets': 8 + } + """) + + self.session.execute( + """ + CREATE TABLE test1.table1 (pk int, ck int, v int, PRIMARY KEY (pk, ck)); + """) + + @staticmethod + def create_data(session): + prepared = session.prepare( + """ + INSERT INTO test1.table1 (pk, ck, v) VALUES (?, ?, ?) + """) + + for i in range(50): + bound = prepared.bind((i, i%5, i%2)) + session.execute(bound) + + def query_data_shard_select(self, session, verify_in_tracing=True): + prepared = session.prepare( + """ + SELECT pk, ck, v FROM test1.table1 WHERE pk = ? + """) + + bound = prepared.bind([(2)]) + results = session.execute(bound, trace=True) + self.assertEqual(results, [(2, 2, 0)]) + if verify_in_tracing: + self.verify_same_shard_in_tracing(results) + + def query_data_host_select(self, session, verify_in_tracing=True): + prepared = session.prepare( + """ + SELECT pk, ck, v FROM test1.table1 WHERE pk = ? + """) + + bound = prepared.bind([(2)]) + results = session.execute(bound, trace=True) + self.assertEqual(results, [(2, 2, 0)]) + if verify_in_tracing: + self.verify_same_host_in_tracing(results) + + def query_data_shard_insert(self, session, verify_in_tracing=True): + prepared = session.prepare( + """ + INSERT INTO test1.table1 (pk, ck, v) VALUES (?, ?, ?) + """) + + bound = prepared.bind([(51), (1), (2)]) + results = session.execute(bound, trace=True) + if verify_in_tracing: + self.verify_same_shard_in_tracing(results) + + def query_data_host_insert(self, session, verify_in_tracing=True): + prepared = session.prepare( + """ + INSERT INTO test1.table1 (pk, ck, v) VALUES (?, ?, ?) + """) + + bound = prepared.bind([(52), (1), (2)]) + results = session.execute(bound, trace=True) + if verify_in_tracing: + self.verify_same_host_in_tracing(results) + + def test_tablets(self): + self.query_data_host_select(self.session) + self.query_data_host_insert(self.session) + + def test_tablets_shard_awareness(self): + self.query_data_shard_select(self.session) + self.query_data_shard_insert(self.session) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index d9ff59fd7a..e60940afac 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -24,7 +24,7 @@ from threading import Thread from cassandra import ConsistencyLevel -from cassandra.cluster import Cluster +from cassandra.cluster import Cluster, ControlConnection from cassandra.metadata import Metadata from cassandra.policies import (RoundRobinPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, TokenAwarePolicy, SimpleConvictionPolicy, @@ -601,6 +601,7 @@ def get_replicas(keyspace, packed_key): class FakeCluster: def __init__(self): self.metadata = Mock(spec=Metadata) + self.control_connection = Mock(spec=ControlConnection) def test_get_distance(self): """ diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 29cddec7a8..d1a7ce4a9f 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -40,6 +40,7 @@ class ResponseFutureTests(unittest.TestCase): def make_basic_session(self): s = Mock(spec=Session) s.row_factory = lambda col_names, rows: [(col_names, rows)] + s.cluster.control_connection._tablets_routing_v1 = False return s def make_pool(self): diff --git a/tests/unit/test_tablets.py b/tests/unit/test_tablets.py new file mode 100644 index 0000000000..3bbba06918 --- /dev/null +++ b/tests/unit/test_tablets.py @@ -0,0 +1,88 @@ +import unittest + +from cassandra.tablets import Tablets, Tablet + +class TabletsTest(unittest.TestCase): + def compare_ranges(self, tablets, ranges): + self.assertEqual(len(tablets), len(ranges)) + + for idx, tablet in enumerate(tablets): + self.assertEqual(tablet.first_token, ranges[idx][0], "First token is not correct in tablet: {}".format(tablet)) + self.assertEqual(tablet.last_token, ranges[idx][1], "Last token is not correct in tablet: {}".format(tablet)) + + def test_add_tablet_to_empty_tablets(self): + tablets = Tablets({("test_ks", "test_tb"): []}) + + tablets.add_tablet("test_ks", "test_tb", Tablet(-6917529027641081857, -4611686018427387905, None)) + + tablets_list = tablets._tablets.get(("test_ks", "test_tb")) + + self.compare_ranges(tablets_list, [(-6917529027641081857, -4611686018427387905)]) + + def test_add_tablet_at_the_beggining(self): + tablets = Tablets({("test_ks", "test_tb"): [Tablet(-6917529027641081857, -4611686018427387905, None)]}) + + tablets.add_tablet("test_ks", "test_tb", Tablet(-8611686018427387905, -7917529027641081857, None)) + + tablets_list = tablets._tablets.get(("test_ks", "test_tb")) + + self.compare_ranges(tablets_list, [(-8611686018427387905, -7917529027641081857), + (-6917529027641081857, -4611686018427387905)]) + + def test_add_tablet_at_the_end(self): + tablets = Tablets({("test_ks", "test_tb"): [Tablet(-6917529027641081857, -4611686018427387905, None)]}) + + tablets.add_tablet("test_ks", "test_tb", Tablet(-1, 2305843009213693951, None)) + + tablets_list = tablets._tablets.get(("test_ks", "test_tb")) + + self.compare_ranges(tablets_list, [(-6917529027641081857, -4611686018427387905), + (-1, 2305843009213693951)]) + + def test_add_tablet_in_the_middle(self): + tablets = Tablets({("test_ks", "test_tb"): [Tablet(-6917529027641081857, -4611686018427387905, None), + Tablet(-1, 2305843009213693951, None)]},) + + tablets.add_tablet("test_ks", "test_tb", Tablet(-4611686018427387905, -2305843009213693953, None)) + + tablets_list = tablets._tablets.get(("test_ks", "test_tb")) + + self.compare_ranges(tablets_list, [(-6917529027641081857, -4611686018427387905), + (-4611686018427387905, -2305843009213693953), + (-1, 2305843009213693951)]) + + def test_add_tablet_intersecting(self): + tablets = Tablets({("test_ks", "test_tb"): [Tablet(-6917529027641081857, -4611686018427387905, None), + Tablet(-4611686018427387905, -2305843009213693953, None), + Tablet(-2305843009213693953, -1, None), + Tablet(-1, 2305843009213693951, None)]}) + + tablets.add_tablet("test_ks", "test_tb", Tablet(-3611686018427387905, -6, None)) + + tablets_list = tablets._tablets.get(("test_ks", "test_tb")) + + self.compare_ranges(tablets_list, [(-6917529027641081857, -4611686018427387905), + (-3611686018427387905, -6), + (-1, 2305843009213693951)]) + + def test_add_tablet_intersecting_with_first(self): + tablets = Tablets({("test_ks", "test_tb"): [Tablet(-8611686018427387905, -7917529027641081857, None), + Tablet(-6917529027641081857, -4611686018427387905, None)]}) + + tablets.add_tablet("test_ks", "test_tb", Tablet(-8011686018427387905, -7987529027641081857, None)) + + tablets_list = tablets._tablets.get(("test_ks", "test_tb")) + + self.compare_ranges(tablets_list, [(-8011686018427387905, -7987529027641081857), + (-6917529027641081857, -4611686018427387905)]) + + def test_add_tablet_intersecting_with_last(self): + tablets = Tablets({("test_ks", "test_tb"): [Tablet(-8611686018427387905, -7917529027641081857, None), + Tablet(-6917529027641081857, -4611686018427387905, None)]}) + + tablets.add_tablet("test_ks", "test_tb", Tablet(-5011686018427387905, -2987529027641081857, None)) + + tablets_list = tablets._tablets.get(("test_ks", "test_tb")) + + self.compare_ranges(tablets_list, [(-8611686018427387905, -7917529027641081857), + (-5011686018427387905, -2987529027641081857)]) From eaa9eb1f9d2ffbc8e0d007643013091e0301c902 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Thu, 11 Jan 2024 18:36:02 +0100 Subject: [PATCH 215/551] Add documentation of tablet awareness --- README.rst | 1 + docs/scylla-specific.rst | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/README.rst b/README.rst index b1833a8fc5..2a3dc73f33 100644 --- a/README.rst +++ b/README.rst @@ -26,6 +26,7 @@ Features * `Concurrent execution utilities `_ * `Object mapper `_ * `Shard awareness `_ +* `Tablet awareness `_ Installation ------------ diff --git a/docs/scylla-specific.rst b/docs/scylla-specific.rst index f830235088..87fcf01aa3 100644 --- a/docs/scylla-specific.rst +++ b/docs/scylla-specific.rst @@ -109,3 +109,16 @@ New Error Types self.session.execute(prepared.bind((123, 456))) except RateLimitReached: raise + + +Tablet Awareness +---------------- + +**scylla-driver** is tablet aware, which mean that it is able to parse `TABLETS_ROUTING_V1` extension to ProtocolFeatures, recieve tablet information send by Scylla in `custom_payload` part of `RESULT` message, and utilize it. +Thanks to that queries to tablet based tables are still shard aware. + +Details on the scylla cql protocol extensions +https://github.com/scylladb/scylladb/blob/master/docs/dev/protocol-extensions.md#negotiate-sending-tablets-info-to-the-drivers + +Details on the sending tablet information to the drivers +https://github.com/scylladb/scylladb/blob/master/docs/dev/protocol-extensions.md#sending-tablet-info-to-the-drivers From d6149e3f1836079629d92903b71f527f85ca6fde Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Jan 2024 07:53:36 +0000 Subject: [PATCH 216/551] build(deps): bump gevent from 20.5.0 to 23.9.0 Bumps [gevent](https://github.com/gevent/gevent) from 20.5.0 to 23.9.0. - [Release notes](https://github.com/gevent/gevent/releases) - [Changelog](https://github.com/gevent/gevent/blob/master/docs/changelog_pre.rst) - [Commits](https://github.com/gevent/gevent/compare/20.5.0...23.9.0) --- updated-dependencies: - dependency-name: gevent dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 6015aad6b0..fa6afd6711 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,7 +8,7 @@ pure-sasl twisted[tls]; python_version >= '3.5' twisted[tls]==19.2.1; python_version < '3.5' gevent>=1.0; platform_machine != 'i686' and platform_machine != 'win32' -gevent==20.5.0; platform_machine == 'i686' or platform_machine == 'win32' +gevent==23.9.0; platform_machine == 'i686' or platform_machine == 'win32' eventlet>=0.33.3 cython packaging From 5dfb81bedd01e28db2c84f31d7a89dc3237874c1 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Mon, 15 Jan 2024 19:49:31 +0100 Subject: [PATCH 217/551] Add unit test for unix domain sockets support in WhiteListRoundRobinPolicy --- tests/unit/test_policies.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index a6c63dcfdc..3ed4d484ac 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -34,7 +34,7 @@ LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy, IdentityTranslator, EC2MultiRegionTranslator, HostFilterPolicy) from cassandra.pool import Host -from cassandra.connection import DefaultEndPoint +from cassandra.connection import DefaultEndPoint, UnixSocketEndPoint from cassandra.query import Statement from six.moves import xrange @@ -1254,6 +1254,17 @@ def test_hosts_with_hostname(self): self.assertEqual(sorted(qplan), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) + + def test_hosts_with_socket_hostname(self): + hosts = [UnixSocketEndPoint('/tmp/scylla-workdir/cql.m')] + policy = WhiteListRoundRobinPolicy(hosts) + host = Host(UnixSocketEndPoint('/tmp/scylla-workdir/cql.m'), SimpleConvictionPolicy) + policy.populate(None, [host]) + + qplan = list(policy.make_query_plan()) + self.assertEqual(sorted(qplan), [host]) + + self.assertEqual(policy.distance(host), HostDistance.LOCAL) class AddressTranslatorTest(unittest.TestCase): From 810291faf355c10f412c44627ada89f40464be1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 17 Jan 2024 13:11:40 +0100 Subject: [PATCH 218/551] Release 3.26.5 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 53a0cad5e7..ac9722681a 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 4) +__version_info__ = (3, 26, 5) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index b8fc66275e..6bf5382c0a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,10 +10,10 @@ # -- Global variables # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.4-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.5-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.26.4-scylla' +LATEST_VERSION = '3.26.5-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From 6b01e490bb697823476d7f5be861b85c665309c0 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Wed, 17 Jan 2024 18:20:13 +0100 Subject: [PATCH 219/551] Update CI to use new way of initializing keyspace with tablets --- .github/workflows/integration-tests.yml | 2 +- tests/integration/experiments/test_tablets.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index d263b52057..8c364e93a1 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -38,5 +38,5 @@ jobs: - name: Test tablets run: | export EVENT_LOOP_MANAGER=${{ matrix.event_loop_manager }} - export SCYLLA_VERSION='unstable/master:2024-01-03T08:06:57Z' + export SCYLLA_VERSION='unstable/master:2024-01-17T17:56:00Z' ./ci/run_integration_test.sh tests/integration/experiments/ diff --git a/tests/integration/experiments/test_tablets.py b/tests/integration/experiments/test_tablets.py index c9e5c3ea3c..5b146f6ebd 100644 --- a/tests/integration/experiments/test_tablets.py +++ b/tests/integration/experiments/test_tablets.py @@ -80,8 +80,9 @@ def create_ks_and_cf(self): CREATE KEYSPACE test1 WITH replication = { 'class': 'NetworkTopologyStrategy', - 'replication_factor': 1, - 'initial_tablets': 8 + 'replication_factor': 1 + } AND tablets = { + 'initial': 8 } """) From 2a09d976fb1c0349ba61bff572c26b6e9bea2f71 Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Thu, 18 Jan 2024 13:46:35 +0800 Subject: [PATCH 220/551] docs: fix minor syntax issues for instance, s/send/sent/: to use past-tense form of the verb to agree with the sentense's structure. and other trivial changes. Signed-off-by: Kefu Chai --- docs/scylla-specific.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/scylla-specific.rst b/docs/scylla-specific.rst index 87fcf01aa3..e9caaa8793 100644 --- a/docs/scylla-specific.rst +++ b/docs/scylla-specific.rst @@ -114,8 +114,8 @@ New Error Types Tablet Awareness ---------------- -**scylla-driver** is tablet aware, which mean that it is able to parse `TABLETS_ROUTING_V1` extension to ProtocolFeatures, recieve tablet information send by Scylla in `custom_payload` part of `RESULT` message, and utilize it. -Thanks to that queries to tablet based tables are still shard aware. +**scylla-driver** is tablet-aware, which means that it is able to parse `TABLETS_ROUTING_V1` extension to ProtocolFeatures, recieve tablet information sent by Scylla in the `custom_payload` part of the `RESULT` message, and utilize it. +Thanks to this, queries to tablet-based tables are still shard-aware. Details on the scylla cql protocol extensions https://github.com/scylladb/scylladb/blob/master/docs/dev/protocol-extensions.md#negotiate-sending-tablets-info-to-the-drivers From 7982b71d23ebc224970578b8a3d436900e083088 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Thu, 18 Jan 2024 10:08:15 +0100 Subject: [PATCH 221/551] Add setting connected_event flag in libevreactor Before, the connected_event flag was set in every implementation of Connection but this utilizing libev. Other reactors have the same `self.error_all_requests(ConnectionShutdown(...))` logic, but they have `self.connected_event.set()` after that, so it was probably an oversight (copy-paste mistake?) that it was missing from this reactor. That was causing the driver to sometimes hang for >3 minutes when shutting down. This commit adds setting the `connected_event` flag in `close()` in `LibevConnection`. --- cassandra/io/libevreactor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index 54e2d0de03..f4908f49fb 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -294,6 +294,7 @@ def close(self): if not self.is_defunct: self.error_all_requests( ConnectionShutdown("Connection to %s was closed" % self.endpoint)) + self.connected_event.set() def handle_write(self, watcher, revents, errno=None): if revents & libev.EV_ERROR: From 4fc60e7a1a4433df1b15cd6b645c24391dcd25a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Sun, 28 Jan 2024 21:37:40 +0100 Subject: [PATCH 222/551] Release 3.26.6 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index ac9722681a..d16aa85976 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 5) +__version_info__ = (3, 26, 6) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 6bf5382c0a..3ab0cfa583 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,10 +10,10 @@ # -- Global variables # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.5-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.6-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.26.5-scylla' +LATEST_VERSION = '3.26.6-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From 67a108ea60e558e3f9345a1de83fc7ceccfc87b8 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Tue, 6 Feb 2024 10:38:39 +0100 Subject: [PATCH 223/551] Close pending connections during shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, if the shutdown occurred in the middle of creating a connection, there was no way to close that connection, resulting in the driver hanging for >3 minutes. This commit introduces a new field in the HostConnection class - _pending_connections - to keep track of connections that are in the middle of being created, along with a mechanism to close these connections if shutdown was executed. Fixes: #262 (this reproducer - https://github.com/kbr-scylla/scylladb/commits/test-pause - doesn’t reproduce with that fix) --- cassandra/cluster.py | 4 ++-- cassandra/connection.py | 6 +++++- cassandra/pool.py | 11 +++++++++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 88c7dd6a3c..1f02c2d6d3 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -1691,13 +1691,13 @@ def set_max_connections_per_host(self, host_distance, max_connections): "when using protocol_version 1 or 2.") self._max_connections_per_host[host_distance] = max_connections - def connection_factory(self, endpoint, *args, **kwargs): + def connection_factory(self, endpoint, host_conn = None, *args, **kwargs): """ Called to create a new connection with proper configuration. Intended for internal use only. """ kwargs = self._make_connection_kwargs(endpoint, kwargs) - return self.connection_class.factory(endpoint, self.connect_timeout, *args, **kwargs) + return self.connection_class.factory(endpoint, self.connect_timeout, host_conn, *args, **kwargs) def _make_connection_factory(self, host, *args, **kwargs): kwargs = self._make_connection_kwargs(host.endpoint, kwargs) diff --git a/cassandra/connection.py b/cassandra/connection.py index 6007b26a27..754555a0d4 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -865,7 +865,7 @@ def create_timer(cls, timeout, callback): raise NotImplementedError() @classmethod - def factory(cls, endpoint, timeout, *args, **kwargs): + def factory(cls, endpoint, timeout, host_conn = None, *args, **kwargs): """ A factory function which returns connections which have succeeded in connecting and are ready for service (or @@ -874,6 +874,10 @@ def factory(cls, endpoint, timeout, *args, **kwargs): start = time.time() kwargs['connect_timeout'] = timeout conn = cls(endpoint, *args, **kwargs) + if host_conn is not None: + host_conn._pending_connections.append(conn) + if host_conn.is_shutdown: + conn.close() elapsed = time.time() - start conn.connected_event.wait(timeout - elapsed) if conn.last_error: diff --git a/cassandra/pool.py b/cassandra/pool.py index bb176b2ee7..315114575c 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -404,6 +404,7 @@ def __init__(self, host, host_distance, session): self._is_replacing = False self._connecting = set() self._connections = {} + self._pending_connections = [] # A pool of additional connections which are not used but affect how Scylla # assigns shards to them. Scylla tends to assign the shard which has # the lowest number of connections. If connections are not distributed @@ -638,7 +639,9 @@ def shutdown(self): future.cancel() connections_to_close = self._connections.copy() + pending_connections_to_close = self._pending_connections.copy() self._connections.clear() + self._pending_connections.clear() # connection.close can call pool.return_connection, which will # obtain self._lock via self._stream_available_condition. @@ -647,6 +650,10 @@ def shutdown(self): log.debug("Closing connection (%s) to %s", id(connection), self.host) connection.close() + for connection in pending_connections_to_close: + log.debug("Closing pending connection (%s) to %s", id(connection), self.host) + connection.close() + self._close_excess_connections() trash_conns = None @@ -714,12 +721,12 @@ def _open_connection_to_missing_shard(self, shard_id): log.debug("shard_aware_endpoint=%r", shard_aware_endpoint) if shard_aware_endpoint: - conn = self._session.cluster.connection_factory(shard_aware_endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released, + conn = self._session.cluster.connection_factory(shard_aware_endpoint, host_conn=self, on_orphaned_stream_released=self.on_orphaned_stream_released, shard_id=shard_id, total_shards=self.host.sharding_info.shards_count) conn.original_endpoint = self.host.endpoint else: - conn = self._session.cluster.connection_factory(self.host.endpoint, on_orphaned_stream_released=self.on_orphaned_stream_released) + conn = self._session.cluster.connection_factory(self.host.endpoint, host_conn=self, on_orphaned_stream_released=self.on_orphaned_stream_released) log.debug("Received a connection %s for shard_id=%i on host %s", id(conn), conn.features.shard_id, self.host) if self.is_shutdown: From 6a88ac43da77cd3705f7655056227fdfeef83bce Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 25 Dec 2023 18:51:03 +0200 Subject: [PATCH 224/551] tests: fix scylla_version handling test shouldn't assume `SCYLLA_VERSION` is an actual version and should be using ccmlib to read the actual versions strings --- tests/integration/__init__.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 52e8b5dad4..f16d32bdf1 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -43,6 +43,7 @@ from cassandra import ProtocolVersion try: + import ccmlib from ccmlib.dse_cluster import DseCluster from ccmlib.cluster import Cluster as CCMCluster from ccmlib.scylla_cluster import ScyllaCluster as CCMScyllaCluster @@ -97,6 +98,12 @@ def get_server_versions(): return (cass_version, cql_version) +def get_scylla_version(scylla_ccm_version_string): + """ get scylla version from ccm before starting a cluster""" + ccm_repo_cache_dir, _ = ccmlib.scylla_repository.setup(version=scylla_ccm_version_string) + return ccmlib.common.get_version_from_build(ccm_repo_cache_dir) + + def _tuple_version(version_string): if '-' in version_string: version_string = version_string[:version_string.index('-')] @@ -372,9 +379,8 @@ def _id_and_mark(f): # 1. unittest doesn't skip setUpClass when used on class and we need it sometimes # 2. unittest doesn't have conditional xfail, and I prefer to use pytest than custom decorator # 3. unittest doesn't have a reason argument, so you don't see the reason in pytest report -# TODO remove second check when we stop using unstable version in CI for tablets -requires_collection_indexes = pytest.mark.skipif(SCYLLA_VERSION is not None and (len(SCYLLA_VERSION.split('/')) != 0 or Version(SCYLLA_VERSION.split(':')[1]) < Version('5.2')), - reason='Scylla supports collection indexes from 5.2 onwards') +requires_collection_indexes = pytest.mark.skipif(SCYLLA_VERSION is not None and Version(get_scylla_version(SCYLLA_VERSION)) < Version('5.2'), + reason='Scylla supports collection indexes from 5.2 onwards') requires_custom_indexes = pytest.mark.skipif(SCYLLA_VERSION is not None, reason='Scylla does not support SASI or any other CUSTOM INDEX class') requires_java_udf = pytest.mark.skipif(SCYLLA_VERSION is not None, From 3de6a36d3a6de4bdc03d881820a7fff3094b7c2f Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 15 Feb 2024 09:04:10 +0200 Subject: [PATCH 225/551] asyncioreactor: initial background_tasks set earlier in b80960f9 we introduce this new set, but initialize it after starting the coroutines, which can lead to cases it won't yet be defined. moveing it to the start of the the `__init__` method fixes the issue --- cassandra/io/asyncioreactor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index 4876b5be1e..4cf3f16d40 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -87,6 +87,7 @@ class AsyncioConnection(Connection): def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) + self._background_tasks = set() self._connect_socket() self._socket.setblocking(0) @@ -106,7 +107,7 @@ def __init__(self, *args, **kwargs): ) self._send_options_message() - self._background_tasks = set() + @classmethod def initialize_reactor(cls): From f4eabdfc97ef6296f6afdfea032b28c655368c2f Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 11 Feb 2024 20:08:49 +0200 Subject: [PATCH 226/551] introducing `ExponentialBackoffRetryPolicy` Adding new RetryPolicy the can do exponential backoff modeled similar to how it works in gocql Fixes: scylladb/python-driver#91 Ref: https://github.com/gocql/gocql/blob/34fdeebefcbf183ed7f916f931aa0586fdaa1b40/policies.go#L156 --- cassandra/cluster.py | 12 +++-- cassandra/policies.py | 53 ++++++++++++++++++++- tests/integration/standard/test_policies.py | 19 +++++++- tests/unit/test_policies.py | 20 +++++++- tests/unit/test_response_future.py | 34 +++++++++++-- 5 files changed, 126 insertions(+), 12 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 1f02c2d6d3..2a4d0d694d 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -5012,12 +5012,16 @@ def exception_from_response(response): return response.to_exception() else: return response + if len(retry_decision) == 2: + retry_type, consistency = retry_decision + delay = 0 + elif len(retry_decision) == 3: + retry_type, consistency, delay = retry_decision - retry_type, consistency = retry_decision if retry_type in (RetryPolicy.RETRY, RetryPolicy.RETRY_NEXT_HOST): self._query_retries += 1 reuse = retry_type == RetryPolicy.RETRY - self._retry(reuse, consistency, host) + self._retry(reuse, consistency, host, delay) elif retry_type is RetryPolicy.RETHROW: self._set_final_exception(exception_from_response(response)) else: # IGNORE @@ -5027,7 +5031,7 @@ def exception_from_response(response): self._errors[host] = exception_from_response(response) - def _retry(self, reuse_connection, consistency_level, host): + def _retry(self, reuse_connection, consistency_level, host, delay): if self._final_exception: # the connection probably broke while we were waiting # to retry the operation @@ -5039,7 +5043,7 @@ def _retry(self, reuse_connection, consistency_level, host): self.message.consistency_level = consistency_level # don't retry on the event loop thread - self.session.submit(self._retry_task, reuse_connection, host) + self.session.cluster.scheduler.schedule(delay, self._retry_task, reuse_connection, host) def _retry_task(self, reuse_connection, host): if self._final_exception: diff --git a/cassandra/policies.py b/cassandra/policies.py index 0537344be6..6912877454 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import random from itertools import islice, cycle, groupby, repeat import logging from random import randint, shuffle @@ -1019,6 +1019,57 @@ def on_unavailable(self, query, consistency, required_replicas, alive_replicas, return self._pick_consistency(alive_replicas) +class ExponentialBackoffRetryPolicy(RetryPolicy): + """ + A policy that do retries with exponential backoff + """ + + def __init__(self, max_num_retries: float, min_interval: float = 0.1, max_interval: float = 10.0, + *args, **kwargs): + """ + `max_num_retries` counts how many times the operation would be retried, + `min_interval` is the initial time in seconds to wait before first retry + `max_interval` is the maximum time to wait between retries + """ + self.min_interval = min_interval + self.max_num_retries = max_num_retries + self.max_interval = max_interval + super(ExponentialBackoffRetryPolicy).__init__(*args, **kwargs) + + def _calculate_backoff(self, attempt: int): + delay = min(self.max_interval, self.min_interval * 2 ** attempt) + # add some jitter + delay += random.random() * self.min_interval - (self.min_interval / 2) + return delay + + def on_read_timeout(self, query, consistency, required_responses, + received_responses, data_retrieved, retry_num): + if retry_num < self.max_num_retries and received_responses >= required_responses and not data_retrieved: + return self.RETRY, consistency, self._calculate_backoff(retry_num) + else: + return self.RETHROW, None, None + + def on_write_timeout(self, query, consistency, write_type, + required_responses, received_responses, retry_num): + if retry_num < self.max_num_retries and write_type == WriteType.BATCH_LOG: + return self.RETRY, consistency, self._calculate_backoff(retry_num) + else: + return self.RETHROW, None, None + + def on_unavailable(self, query, consistency, required_replicas, + alive_replicas, retry_num): + if retry_num < self.max_num_retries: + return self.RETRY_NEXT_HOST, None, self._calculate_backoff(retry_num) + else: + return self.RETHROW, None, None + + def on_request_error(self, query, consistency, error, retry_num): + if retry_num < self.max_num_retries: + return self.RETRY_NEXT_HOST, None, self._calculate_backoff(retry_num) + else: + return self.RETHROW, None, None + + class AddressTranslator(object): """ Interface for translating cluster-defined endpoints. diff --git a/tests/integration/standard/test_policies.py b/tests/integration/standard/test_policies.py index 46e91918ac..a91505fe24 100644 --- a/tests/integration/standard/test_policies.py +++ b/tests/integration/standard/test_policies.py @@ -16,7 +16,7 @@ from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import HostFilterPolicy, RoundRobinPolicy, SimpleConvictionPolicy, \ - WhiteListRoundRobinPolicy + WhiteListRoundRobinPolicy, ExponentialBackoffRetryPolicy from cassandra.pool import Host from cassandra.connection import DefaultEndPoint @@ -90,3 +90,20 @@ def test_only_connects_to_subset(self): queried_hosts.update(response.response_future.attempted_hosts) queried_hosts = set(host.address for host in queried_hosts) self.assertEqual(queried_hosts, only_connect_hosts) + + +class ExponentialRetryPolicyTests(unittest.TestCase): + + def setUp(self): + self.cluster = TestCluster(default_retry_policy=ExponentialBackoffRetryPolicy(max_num_retries=3)) + self.session = self.cluster.connect() + + def tearDown(self): + self.cluster.shutdown() + + def test_exponential_retries(self): + self.session.execute( + """ + CREATE KEYSPACE preparedtests + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} + """) \ No newline at end of file diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 8e5fa60936..db9eae6324 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -32,7 +32,7 @@ RetryPolicy, WriteType, DowngradingConsistencyRetryPolicy, ConstantReconnectionPolicy, LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy, - IdentityTranslator, EC2MultiRegionTranslator, HostFilterPolicy) + IdentityTranslator, EC2MultiRegionTranslator, HostFilterPolicy, ExponentialBackoffRetryPolicy) from cassandra.pool import Host from cassandra.connection import DefaultEndPoint, UnixSocketEndPoint from cassandra.query import Statement @@ -1247,6 +1247,24 @@ def test_unavailable(self): self.assertEqual(consistency, ConsistencyLevel.ONE) +class ExponentialRetryPolicyTest(unittest.TestCase): + def test_calculate_backoff(self): + policy = ExponentialBackoffRetryPolicy(max_num_retries=2) + + cases = [ + (0, 0.1), + (1, 2 * 0.1), + (2, (2 * 2) * 0.1), + (3, (2 * 2 * 2) * 0.1), + ] + + for attempts, delay in cases: + for i in range(100): + d = policy._calculate_backoff(attempts) + assert d > delay - (0.1 / 2), f"d={d} attempts={attempts}, delay={delay}" + assert d < delay + (0.1 / 2), f"d={d} attempts={attempts}, delay={delay}" + + class WhiteListRoundRobinPolicyTest(unittest.TestCase): def test_hosts_with_hostname(self): diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index d1a7ce4a9f..82da9e0049 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -30,7 +30,7 @@ RESULT_KIND_ROWS, RESULT_KIND_SET_KEYSPACE, RESULT_KIND_SCHEMA_CHANGE, RESULT_KIND_PREPARED, ProtocolHandler) -from cassandra.policies import RetryPolicy +from cassandra.policies import RetryPolicy, ExponentialBackoffRetryPolicy from cassandra.pool import NoConnectionsAvailable from cassandra.query import SimpleStatement @@ -265,7 +265,7 @@ def test_retry_policy_says_retry(self): host = Mock() rf._set_result(host, None, None, result) - session.submit.assert_called_once_with(rf._retry_task, True, host) + rf.session.cluster.scheduler.schedule.assert_called_once_with(ANY, rf._retry_task, True, host) self.assertEqual(1, rf._query_retries) connection = Mock(spec=Connection) @@ -300,7 +300,7 @@ def test_retry_with_different_host(self): host = Mock() rf._set_result(host, None, None, result) - session.submit.assert_called_once_with(rf._retry_task, False, host) + rf.session.cluster.scheduler.schedule.assert_called_once_with(ANY, rf._retry_task, False, host) # query_retries does get incremented for Overloaded/Bootstrapping errors (since 3.18) self.assertEqual(1, rf._query_retries) @@ -332,7 +332,8 @@ def test_all_retries_fail(self): rf._set_result(host, None, None, result) # simulate the executor running this - session.submit.assert_called_once_with(rf._retry_task, False, host) + rf.session.cluster.scheduler.schedule.assert_called_once_with(ANY, rf._retry_task, False, host) + rf._retry_task(False, host) # it should try with a different host @@ -342,11 +343,34 @@ def test_all_retries_fail(self): rf._set_result(host, None, None, result) # simulate the executor running this - session.submit.assert_called_with(rf._retry_task, False, host) + rf.session.cluster.scheduler.schedule.assert_called_with(ANY, rf._retry_task, False, host) rf._retry_task(False, host) self.assertRaises(NoHostAvailable, rf.result) + def test_exponential_retry_policy_fail(self): + session = self.make_session() + pool = session._pools.get.return_value + connection = Mock(spec=Connection) + pool.borrow_connection.return_value = (connection, 1) + + query = SimpleStatement("SELECT * FROM foo") + message = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE) + rf = ResponseFuture(session, message, query, 1, retry_policy=ExponentialBackoffRetryPolicy(2)) + rf.send_request() + rf.session._pools.get.assert_called_once_with('ip1') + + result = Mock(spec=IsBootstrappingErrorMessage, info={}) + host = Mock() + rf._set_result(host, None, None, result) + + # simulate the executor running this + rf.session.cluster.scheduler.schedule.assert_called_once_with(ANY, rf._retry_task, False, host) + + delay = rf.session.cluster.scheduler.schedule.mock_calls[-1][1][0] + assert delay > 0.05 + rf._retry_task(False, host) + def test_all_pools_shutdown(self): session = self.make_basic_session() session.cluster._default_load_balancing_policy.make_query_plan.return_value = ['ip1', 'ip2'] From 8138dca6427176a4eccb6b872769b16e31769ad7 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 15 Feb 2024 09:10:20 +0200 Subject: [PATCH 227/551] tests: stop using `set_keyspace` in test_can_register_udt_before_connecting this test is doing multiple is using `USE` comamnd, seems like we are having same race conditions with the applying of those, and getting the following error once in a while: ``` > raise self._final_exception E cassandra.InvalidRequest: Error from server: code=2200 [Invalid query] message="Unknown field 'is_cool' in value of user defined type user" ``` in this test we'll use full qulified names of the table with the keyspace and not set_keyspsce (i.e. `USE` command) Fixes: #264 --- tests/integration/standard/test_udts.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 4c7826fb98..8cd6bc3c1b 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -13,7 +13,6 @@ # limitations under the License. import unittest - from collections import namedtuple from functools import partial import six @@ -127,17 +126,15 @@ def test_can_register_udt_before_connecting(self): CREATE KEYSPACE udt_test_register_before_connecting WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } """) - s.set_keyspace("udt_test_register_before_connecting") - s.execute("CREATE TYPE user (age int, name text)") - s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") + s.execute("CREATE TYPE udt_test_register_before_connecting.user (age int, name text)") + s.execute("CREATE TABLE udt_test_register_before_connecting.mytable (a int PRIMARY KEY, b frozen)") s.execute(""" CREATE KEYSPACE udt_test_register_before_connecting2 WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' } """) - s.set_keyspace("udt_test_register_before_connecting2") - s.execute("CREATE TYPE user (state text, is_cool boolean)") - s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen)") + s.execute("CREATE TYPE udt_test_register_before_connecting2.user (state text, is_cool boolean)") + s.execute("CREATE TABLE udt_test_register_before_connecting2.mytable (a int PRIMARY KEY, b frozen)") # now that types are defined, shutdown and re-create Cluster c.shutdown() @@ -150,19 +147,18 @@ def test_can_register_udt_before_connecting(self): c.register_user_type("udt_test_register_before_connecting2", "user", User2) s = c.connect(wait_for_all_pools=True) + c.control_connection.wait_for_schema_agreement() - s.set_keyspace("udt_test_register_before_connecting") - s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User1(42, 'bob'))) - result = s.execute("SELECT b FROM mytable WHERE a=0") + s.execute("INSERT INTO udt_test_register_before_connecting.mytable (a, b) VALUES (%s, %s)", (0, User1(42, 'bob'))) + result = s.execute("SELECT b FROM udt_test_register_before_connecting.mytable WHERE a=0") row = result[0] self.assertEqual(42, row.b.age) self.assertEqual('bob', row.b.name) self.assertTrue(type(row.b) is User1) # use the same UDT name in a different keyspace - s.set_keyspace("udt_test_register_before_connecting2") - s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User2('Texas', True))) - result = s.execute("SELECT b FROM mytable WHERE a=0") + s.execute("INSERT INTO udt_test_register_before_connecting2.mytable (a, b) VALUES (%s, %s)", (0, User2('Texas', True))) + result = s.execute("SELECT b FROM udt_test_register_before_connecting2.mytable WHERE a=0") row = result[0] self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) From c7e6ebbdc7ff0c3c8d24d2a745479da615b0832d Mon Sep 17 00:00:00 2001 From: muzarski Date: Wed, 29 Nov 2023 14:48:57 +0100 Subject: [PATCH 228/551] pool: log error when failed to connect to shard The exception from `HostConnection::_open_connection_to_missing_shard` during connection failure is silently dropped by the callers. This function is submitted to the `ThreadPoolExecutor` which assigns the result of this function to the future (either success or exception). The callers throughout the code ignore the future's result and that is why this exception is ignored. In this commit we add an error log when opening a connection to the specific shard fails. --- cassandra/pool.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/cassandra/pool.py b/cassandra/pool.py index 315114575c..738fc8e6d6 100644 --- a/cassandra/pool.py +++ b/cassandra/pool.py @@ -719,12 +719,15 @@ def _open_connection_to_missing_shard(self, shard_id): return shard_aware_endpoint = self._get_shard_aware_endpoint() log.debug("shard_aware_endpoint=%r", shard_aware_endpoint) - if shard_aware_endpoint: - conn = self._session.cluster.connection_factory(shard_aware_endpoint, host_conn=self, on_orphaned_stream_released=self.on_orphaned_stream_released, - shard_id=shard_id, - total_shards=self.host.sharding_info.shards_count) - conn.original_endpoint = self.host.endpoint + try: + conn = self._session.cluster.connection_factory(shard_aware_endpoint, host_conn=self, on_orphaned_stream_released=self.on_orphaned_stream_released, + shard_id=shard_id, + total_shards=self.host.sharding_info.shards_count) + conn.original_endpoint = self.host.endpoint + except Exception as exc: + log.error("Failed to open connection to %s, on shard_id=%i: %s", self.host, shard_id, exc) + raise else: conn = self._session.cluster.connection_factory(self.host.endpoint, host_conn=self, on_orphaned_stream_released=self.on_orphaned_stream_released) From f2a80f1772e8addb6ea6bc5171c63b8854ef0aa7 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Thu, 29 Feb 2024 15:48:16 +0100 Subject: [PATCH 229/551] Only add host if endpoint is not already present In d735957 there was a functionality added to reresolve hostnames when all hosts are unreachable. In such a scenario, the driver will try to save the situation by reresolving the contact points in case it helps. However, if there was no ip address change, this results in creation of new (duplicate) Hosts (same endpoint different host_id) which in turn starts new reconnection processes. Those duplicate reconnection processes can make the situation worse when the driver regains connectivity with the cluster. Different Hosts with the same endpoint are reconnecting in different moments and this cause host to be unreachable in unpredictable moments. This commit introduces checking if the resolved endpoint is already present in Cluster Metadata information. The new host is added only if this condition is not true. Fixes: #295 Refs: scylladb/scylladb#16709, scylladb/scylladb#17353 --- cassandra/cluster.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 2a4d0d694d..19d87b2a58 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -2169,6 +2169,9 @@ def add_host(self, endpoint, datacenter=None, rack=None, signal=True, refresh_no the metadata. Intended for internal use only. """ + with self.metadata._hosts_lock: + if endpoint in self.metadata._host_id_by_endpoint: + return self.metadata._hosts[self.metadata._host_id_by_endpoint[endpoint]], False host, new = self.metadata.add_or_return_host(Host(endpoint, self.conviction_policy_factory, datacenter, rack, host_id=host_id)) if new and signal: log.info("New Cassandra host %r discovered", host) From 3815f534eca5c33a611d70a16fcee38715f4d6a5 Mon Sep 17 00:00:00 2001 From: Piotr Grabowski Date: Fri, 1 Mar 2024 16:45:04 +0100 Subject: [PATCH 230/551] cluster: improve logging of peers row validation Before this change, when the driver received an invalid system.peers row it would log a very general warning: Found an invalid row for peer (127.0.73.5). Ignoring host. A system.peers row can be invalid for a multitude of reasons and that warning message did not describe the specific reason for the failure. Improve the warning message by adding a specific reason why the row is considered invalid by the driver. The message now also includes the host_id or the entire row (in case the driver received a row without even the basic broadcast_rpc). It might be a bit inelegant to introduce a side effect (logging) to the _is_valid_peer static method, however the alternative solution seemed even worse - adding that code to the already big _refresh_node_list_and_token_map. Fixes #303 --- cassandra/cluster.py | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 19d87b2a58..77ae703597 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -3950,9 +3950,6 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, should_rebuild_token_map = force_token_rebuild or self._cluster.metadata.partitioner is None for row in peers_result: if not self._is_valid_peer(row): - log.warning( - "Found an invalid row for peer (%s). Ignoring host." % - _NodeInfo.get_broadcast_rpc_address(row)) continue endpoint = self._cluster.endpoint_factory.create(row) @@ -4019,9 +4016,40 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, @staticmethod def _is_valid_peer(row): - return bool(_NodeInfo.get_broadcast_rpc_address(row) and row.get("host_id") and - row.get("data_center") and row.get("rack") and - ('tokens' not in row or row.get('tokens'))) + broadcast_rpc = _NodeInfo.get_broadcast_rpc_address(row) + host_id = row.get("host_id") + + if not broadcast_rpc: + log.warning( + "Found an invalid row for peer - missing broadcast_rpc (full row: %s). Ignoring host." % + row) + return False + + if not host_id: + log.warning( + "Found an invalid row for peer - missing host_id (broadcast_rpc: %s). Ignoring host." % + broadcast_rpc) + return False + + if not row.get("data_center"): + log.warning( + "Found an invalid row for peer - missing data_center (broadcast_rpc: %s, host_id: %s). Ignoring host." % + (broadcast_rpc, host_id)) + return False + + if not row.get("rack"): + log.warning( + "Found an invalid row for peer - missing rack (broadcast_rpc: %s, host_id: %s). Ignoring host." % + (broadcast_rpc, host_id)) + return False + + if "tokens" in row and not row.get("tokens"): + log.warning( + "Found an invalid row for peer - tokens is None (broadcast_rpc: %s, host_id: %s). Ignoring host." % + (broadcast_rpc, host_id)) + return False + + return True def _update_location_info(self, host, datacenter, rack): if host.datacenter == datacenter and host.rack == rack: From f3567166a2c151a1d8a44e1281b17bd612b2032d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 1 Mar 2024 20:32:09 +0100 Subject: [PATCH 231/551] Release 3.26.7 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index d16aa85976..6a5a1e517c 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 6) +__version_info__ = (3, 26, 7) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 3ab0cfa583..8f1b53b102 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,10 +10,10 @@ # -- Global variables # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.6-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.7-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.26.6-scylla' +LATEST_VERSION = '3.26.7-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From 12daf57bf9106da34bb2e53790769310d9898e45 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Thu, 7 Mar 2024 13:14:56 +0100 Subject: [PATCH 232/551] Remove endpoint to host_id mapping when removing host by host_id To remove host not found in peers metadata remove_host_by_host_id is used. In most cases we want to remove host that is a duplicate of host found in peers metadata with the same endpoint but different host_id. Because of that mapping in _host_id_by_endpoint is already overwritten with new host found in peers metadata so we don't want to remove it. In case that we want to remove host that do not have its duplicate with different host_id in peers metadata we do need to remove mapping from _host_id_by_endpoint. This commit intruduces handling this case. Refs: https://github.com/scylladb/scylladb/issues/17662 --- cassandra/cluster.py | 2 +- cassandra/metadata.py | 4 +++- tests/unit/test_control_connection.py | 4 +++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 77ae703597..8ed0647ba9 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -4007,7 +4007,7 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, if old_host_id not in found_host_ids: should_rebuild_token_map = True log.debug("[control connection] Removing host not found in peers metadata: %r", old_host) - self._cluster.metadata.remove_host_by_host_id(old_host_id) + self._cluster.metadata.remove_host_by_host_id(old_host_id, old_host.endpoint) log.debug("[control connection] Finished fetching ring info") if partitioner and should_rebuild_token_map: diff --git a/cassandra/metadata.py b/cassandra/metadata.py index c2993eaa3f..9ef24b981d 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -344,8 +344,10 @@ def remove_host(self, host): self._host_id_by_endpoint.pop(host.endpoint, False) return bool(self._hosts.pop(host.host_id, False)) - def remove_host_by_host_id(self, host_id): + def remove_host_by_host_id(self, host_id, endpoint=None): with self._hosts_lock: + if endpoint and self._host_id_by_endpoint[endpoint] == host_id: + self._host_id_by_endpoint.pop(endpoint, False) return bool(self._hosts.pop(host_id, False)) def update_host(self, host, old_endpoint): diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index 51ea297724..dc5b37d799 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -88,7 +88,9 @@ def update_host(self, host, old_endpoint): def all_hosts_items(self): return list(self.hosts.items()) - def remove_host_by_host_id(self, host_id): + def remove_host_by_host_id(self, host_id, endpoint=None): + if endpoint and self._host_id_by_endpoint[endpoint] == host_id: + self._host_id_by_endpoint.pop(endpoint, False) self.removed_hosts.append(self.hosts.pop(host_id, False)) return bool(self.hosts.pop(host_id, False)) From 6ec9774346f7a65e5bd79e53ebddb1f9a1730386 Mon Sep 17 00:00:00 2001 From: sylwiaszunejko Date: Mon, 18 Mar 2024 15:55:08 +0100 Subject: [PATCH 233/551] Release 3.26.8 --- cassandra/__init__.py | 2 +- docs/conf.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index 6a5a1e517c..b9ea95ddc3 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 7) +__version_info__ = (3, 26, 8) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 8f1b53b102..466bf9e84a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,10 +10,10 @@ # -- Global variables # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.7-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.8-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.26.7-scylla' +LATEST_VERSION = '3.26.8-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated From d7817c2f37f0aa4cceacbe0b80fb66802e1e42f7 Mon Sep 17 00:00:00 2001 From: Nigel Huang <28766663+nigel5@users.noreply.github.com> Date: Wed, 3 Apr 2024 00:42:29 -0400 Subject: [PATCH 234/551] Update index.rst --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index c21d293b6f..f33819cbd3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ A Python client driver for `Scylla `_. This driver works exclusively with the Cassandra Query Language v3 (CQL3) and Cassandra's native protocol. -The driver supports Python 2.7, 3.5, 3.6, 3.7 and 3.8. +The driver supports Python 3.6-3.11. This driver is open source under the `Apache v2 License `_. From 0ab7128cb43d1753f373989a03cb986f5fb1552a Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 7 May 2024 14:26:54 +0100 Subject: [PATCH 235/551] docs: update theme --- .github/workflows/docs-pages.yaml | 8 +++++--- .github/workflows/docs-pr.yaml | 6 +++--- docs/pyproject.toml | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.github/workflows/docs-pages.yaml b/.github/workflows/docs-pages.yaml index 454c013441..ada7013134 100644 --- a/.github/workflows/docs-pages.yaml +++ b/.github/workflows/docs-pages.yaml @@ -6,6 +6,7 @@ on: push: branches: - master + - 'branch-**' paths: - "docs/**" workflow_dispatch: @@ -15,14 +16,15 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: + ref: ${{ github.event.repository.default_branch }} persist-credentials: false fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: - python-version: 3.9 + python-version: '3.10' - name: Set up env run: make -C docs setupenv - name: Build driver diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index 1935567dea..fed2d166fa 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -14,14 +14,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: persist-credentials: false fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: - python-version: 3.9 + python-version: '3.10' - name: Set up env run: make -C docs setupenv - name: Build driver diff --git a/docs/pyproject.toml b/docs/pyproject.toml index d9c8bf8f04..6513716249 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -17,7 +17,7 @@ recommonmark = "0.7.1" redirects_cli ="~0.1.2" sphinx-autobuild = "2021.3.14" sphinx-sitemap = "2.5.1" -sphinx-scylladb-theme = "~1.6.1" +sphinx-scylladb-theme = "~1.7.2" sphinx-multiversion-scylla = "~0.3.1" Sphinx = "7.2.6" scales = "^1.0.9" From 82b4863a58a59a28d27848c6eec22f503be665e0 Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Mon, 20 May 2024 14:38:30 +0800 Subject: [PATCH 236/551] cassandra/cluster.py: use raw string when appropriate Python complains at seeing ```py re.compile(r'^\s*BEGIN\s+[a-zA-Z]*\s*BATCH', re.UNICODE) ``` ``` <>:1: SyntaxWarning: invalid escape sequence '\s' ``` but the interpreter continues on, and take "\s" as it is without escaping it. still, it's not a valid string literal. because "\s" is not an escape sequence, while "\\s" is, but we don't have to escape "\" here, we can just use the raw string. simpler this way. in this change, we trade the invalid escape sequence with a raw string to silence this warning. Signed-off-by: Kefu Chai --- cassandra/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 8ed0647ba9..5f2669c0bc 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -5451,7 +5451,7 @@ def cancel_continuous_paging(self): except AttributeError: raise DriverException("Attempted to cancel paging with no active session. This is only for requests with ContinuousdPagingOptions.") - batch_regex = re.compile('^\s*BEGIN\s+[a-zA-Z]*\s*BATCH') + batch_regex = re.compile(r'^\s*BEGIN\s+[a-zA-Z]*\s*BATCH') @property def was_applied(self): From 8c562f48311a7298cd19106c55536d48454adf34 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 9 May 2024 16:30:22 +0300 Subject: [PATCH 237/551] CI: use `--break-system-packages` when using pip globally seems like recent versions of pip on some OSes is preventing the user from installing things globally we should override it, since we know what are we doing (most of the time). anyhow that code is run only in CI, and never locally Ref: https://veronneau.org/python-311-pip-and-breaking-system-packages.html --- .github/workflows/build-push.yml | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 74f0415822..fc5ef558ed 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -46,11 +46,16 @@ jobs: platform: PyPy steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 name: Install Python + - name: Enable pip installing globally + if: runner.os == 'MacOs' || runner.os == 'Windows' + run: | + echo "PIP_BREAK_SYSTEM_PACKAGES=1" >> $GITHUB_ENV + - name: Install cibuildwheel run: | python3 -m pip install cibuildwheel==2.16.2 @@ -124,9 +129,9 @@ jobs: if: "(!contains(github.event.pull_request.labels.*.name, 'disable-test-build'))|| github.event_name == 'push' && endsWith(github.event.ref, 'scylla')" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 name: Install Python - name: Build sdist @@ -134,7 +139,7 @@ jobs: pip install build python -m build --sdist - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: path: dist/*.tar.gz @@ -146,7 +151,7 @@ jobs: # alternatively, to publish when a GitHub Release is created, use the following rule: # if: github.event_name == 'release' && github.event.action == 'published' steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v4 with: name: artifact path: dist From dc05ae7066e4c8eec91dabc5f6fd783f3a091684 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 11 Feb 2024 20:21:08 +0200 Subject: [PATCH 238/551] tests/integration: set `skip_wait_for_gossip_to_settle=0` to speed up the boot sequence of scylla nodes we are using `skip_wait_for_gossip_to_settle=0` same as we are using for quite a while in dtest on almost all tests also introduced `wait_other_notice=True` for placeing where starting the cluster, cause without it we can get into situatuion we start a test, and cluster isn't fully ready and up. this change shaves 1h of integration tests run, and it's now finishes in 28min. --- tests/integration/__init__.py | 5 +++-- tests/integration/long/test_policies.py | 2 +- .../standard/test_authentication_misconfiguration.py | 2 +- tests/integration/standard/test_custom_cluster.py | 6 +----- tests/integration/standard/test_scylla_cloud.py | 2 +- 5 files changed, 7 insertions(+), 10 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index f16d32bdf1..7826f4bcf9 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -499,7 +499,7 @@ def is_current_cluster(cluster_name, node_counts, workloads): def start_cluster_wait_for_up(cluster): - cluster.start(wait_for_binary_proto=True) + cluster.start(wait_for_binary_proto=True, wait_other_notice=True) # Added to wait for slow nodes to start up log.debug("Cluster started waiting for binary ports") for node in CCM_CLUSTER.nodes.values(): @@ -623,6 +623,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, else: CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf'], 'start_native_transport': True}) + CCM_CLUSTER.set_configuration_options({'skip_wait_for_gossip_to_settle': 0}) # Permit IS NOT NULL restriction on non-primary key columns of a materialized view # This allows `test_metadata_with_quoted_identifiers` to run CCM_CLUSTER.set_configuration_options({'strict_is_not_null_in_views': False}) @@ -659,7 +660,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, node.set_workloads(workloads) if start: log.debug("Starting CCM cluster: {0}".format(cluster_name)) - CCM_CLUSTER.start(jvm_args=jvm_args, wait_for_binary_proto=True) + CCM_CLUSTER.start(jvm_args=jvm_args, wait_for_binary_proto=True, wait_other_notice=True) # Added to wait for slow nodes to start up log.debug("Cluster started waiting for binary ports") for node in CCM_CLUSTER.nodes.values(): diff --git a/tests/integration/long/test_policies.py b/tests/integration/long/test_policies.py index 680d0d7980..33f35ced0d 100644 --- a/tests/integration/long/test_policies.py +++ b/tests/integration/long/test_policies.py @@ -29,7 +29,7 @@ class RetryPolicyTests(unittest.TestCase): @classmethod def tearDownClass(cls): cluster = get_cluster() - cluster.start(wait_for_binary_proto=True) # make sure other nodes are restarted + cluster.start(wait_for_binary_proto=True, wait_other_notice=True) # make sure other nodes are restarted def test_should_rethrow_on_unvailable_with_default_policy_if_cas(self): """ diff --git a/tests/integration/standard/test_authentication_misconfiguration.py b/tests/integration/standard/test_authentication_misconfiguration.py index f5a9cebcdf..2b02664c3f 100644 --- a/tests/integration/standard/test_authentication_misconfiguration.py +++ b/tests/integration/standard/test_authentication_misconfiguration.py @@ -31,7 +31,7 @@ def setUpClass(cls): 'authenticator': 'PasswordAuthenticator', 'authorizer': 'CassandraAuthorizer', }) - ccm_cluster.start(wait_for_binary_proto=True) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) cls.ccm_cluster = ccm_cluster diff --git a/tests/integration/standard/test_custom_cluster.py b/tests/integration/standard/test_custom_cluster.py index 6cdfb8d1c3..20235f0057 100644 --- a/tests/integration/standard/test_custom_cluster.py +++ b/tests/integration/standard/test_custom_cluster.py @@ -26,11 +26,7 @@ def setup_module(): config_options = {'native_transport_port': 9046} ccm_cluster.set_configuration_options(config_options) # can't use wait_for_binary_proto cause ccm tries on port 9042 - ccm_cluster.start(wait_for_binary_proto=False) - # wait until all nodes are up - wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.1'], port=9046).connect().shutdown(), 1, 20) - wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.2'], port=9046).connect().shutdown(), 1, 20) - wait_until_not_raised(lambda: TestCluster(contact_points=['127.0.0.3'], port=9046).connect().shutdown(), 1, 120) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) def teardown_module(): diff --git a/tests/integration/standard/test_scylla_cloud.py b/tests/integration/standard/test_scylla_cloud.py index 4515358085..d1a22f8826 100644 --- a/tests/integration/standard/test_scylla_cloud.py +++ b/tests/integration/standard/test_scylla_cloud.py @@ -41,7 +41,7 @@ def start_cluster_with_proxy(self): ccm_cluster._update_config() - ccm_cluster.start(wait_for_binary_proto=True) + ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True) nodes_info = get_cluster_info(ccm_cluster, port=ssl_port) refresh_certs(ccm_cluster, nodes_info) From cdd125adbc7b0af1a9e5a1deaa5fc3d03a2b03f4 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 28 Feb 2024 15:12:14 +0200 Subject: [PATCH 239/551] ci: enable pytest run debug --- ci/run_integration_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh index 2796a33e61..f7f1f8769e 100755 --- a/ci/run_integration_test.sh +++ b/ci/run_integration_test.sh @@ -37,5 +37,5 @@ ccm remove # run test export MAPPED_SCYLLA_VERSION=3.11.4 -PROTOCOL_VERSION=4 pytest -rf --import-mode append $* +PROTOCOL_VERSION=4 pytest -vv -s --log-cli-level=debug -rf --import-mode append $* From dedf571f6c52acc92e7d06d92b9db7b399753f8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 4 Jun 2024 18:23:16 +0200 Subject: [PATCH 240/551] AsyncioConnection: fix initialize_reactor when called in event loop Previously, if executed within existing asyncio loop, driver would take the loop, assume it's not used and start it in a separate thread. Additionally, if executed outside of loop, driver would create a new one and make it default for calling thread. Those behaviors are wrong so they are changed. Now driver creates its own loop and executes it in a thread. Code that handled pid changes, which can happen when class is transferred using e.g. multiprocessing, is fixed too - previously it didn't create new thread after such transition. --- cassandra/io/asyncioreactor.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/cassandra/io/asyncioreactor.py b/cassandra/io/asyncioreactor.py index 4cf3f16d40..41b744602d 100644 --- a/cassandra/io/asyncioreactor.py +++ b/cassandra/io/asyncioreactor.py @@ -113,15 +113,17 @@ def __init__(self, *args, **kwargs): def initialize_reactor(cls): with cls._lock: if cls._pid != os.getpid(): + # This means that class was passed to another process, + # e.g. using multiprocessing. + # In such case the class instance will be different and passing + # tasks to loop thread won't work. + # To fix we need to re-initialize the class cls._loop = None + cls._loop_thread = None + cls._pid = os.getpid() if cls._loop is None: - try: - cls._loop = asyncio.get_running_loop() - except RuntimeError: - cls._loop = asyncio.new_event_loop() - asyncio.set_event_loop(cls._loop) - - if not cls._loop_thread: + assert cls._loop_thread is None + cls._loop = asyncio.new_event_loop() # daemonize so the loop will be shut down on interpreter # shutdown cls._loop_thread = Thread(target=cls._loop.run_forever, From 2932139deaf660d25027c919d366e0d797ecefe4 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 3 Jun 2024 19:03:37 +0300 Subject: [PATCH 241/551] CI: move make aarch64 first class citizen remove the exprimental actions, and make them part of the rest of building sequence so it won't be possible to release with them working. --- .github/workflows/build-experimental.yml | 62 ------------------------ .github/workflows/build-push.yml | 50 +++++++++++++++++-- 2 files changed, 45 insertions(+), 67 deletions(-) delete mode 100644 .github/workflows/build-experimental.yml diff --git a/.github/workflows/build-experimental.yml b/.github/workflows/build-experimental.yml deleted file mode 100644 index bfc6bd0949..0000000000 --- a/.github/workflows/build-experimental.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: experimental -on: [push, pull_request] - -env: - CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libev libev-devel openssl openssl-devel" - CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" - CIBW_BUILD: "cp39* cp310* cp311* cp312*" - CIBW_SKIP: "*musllinux*" -jobs: - build_wheels: - if: contains(github.event.pull_request.labels.*.name, 'test-build-experimental') || github.event_name == 'push' && endsWith(github.event.ref, 'scylla') - # The host should always be linux - runs-on: ubuntu-latest - name: Build experimental ${{ matrix.archs }} wheels - strategy: - fail-fast: false - matrix: - archs: [ aarch64, ppc64le ] - - steps: - - uses: actions/checkout@v3 - - - name: Set up QEMU - id: qemu - uses: docker/setup-qemu-action@v1 - with: - platforms: all - if: runner.os == 'Linux' - - - uses: actions/setup-python@v4 - name: Install Python - - - name: Install cibuildwheel - run: | - python -m pip install cibuildwheel==2.16.2 - - - name: Build wheels - run: | - python -m cibuildwheel --archs ${{ matrix.archs }} --output-dir wheelhouse - - - uses: actions/upload-artifact@v2 - with: - path: ./wheelhouse/*.whl - - upload_pypi: - needs: [build_wheels] - runs-on: ubuntu-latest - # upload to PyPI on every tag starting with 'v' - if: github.event_name == 'push' && endsWith(github.event.ref, 'scylla') - # alternatively, to publish when a GitHub Release is created, use the following rule: - # if: github.event_name == 'release' && github.event.action == 'published' - steps: - - uses: actions/download-artifact@v2 - with: - name: artifact - path: dist - - - uses: pypa/gh-action-pypi-publish@master - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} - diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index fc5ef558ed..9d33f6d166 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -120,8 +120,9 @@ jobs: run: | python3 -m cibuildwheel --output-dir wheelhouse - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v4 with: + name: wheels-${{ matrix.os }}-${{ matrix.platform }} path: ./wheelhouse/*.whl build_sdist: @@ -141,10 +142,49 @@ jobs: - uses: actions/upload-artifact@v4 with: + name: source-dist path: dist/*.tar.gz + build_wheels_extra_arch: + if: "(!contains(github.event.pull_request.labels.*.name, 'disable-test-build'))|| github.event_name == 'push' && endsWith(github.event.ref, 'scylla')" + # The host should always be linux + runs-on: ubuntu-latest + name: Build extra arch ${{ matrix.archs }} wheels + strategy: + fail-fast: false + matrix: + archs: [ aarch64,] # ppc64le ] + + steps: + - uses: actions/checkout@v4 + + - name: Set up QEMU + id: qemu + uses: docker/setup-qemu-action@v3 + with: + platforms: all + if: runner.os == 'Linux' + + - uses: actions/setup-python@v5 + name: Install Python + + - name: Install cibuildwheel + run: | + python -m pip install cibuildwheel==2.16.2 + + - name: Build wheels + env: + CIBW_BUILD: "cp39* cp310* cp311* cp312*" # limit to specific version since it take much more time than jobs limit + run: | + python -m cibuildwheel --archs ${{ matrix.archs }} --output-dir wheelhouse + + - uses: actions/upload-artifact@v4 + with: + name: wheels-${{ matrix.archs }} + path: ./wheelhouse/*.whl + upload_pypi: - needs: [build_wheels, build_sdist] + needs: [build_wheels, build_wheels_extra_arch, build_sdist] runs-on: ubuntu-latest # upload to PyPI on every tag starting with 'v' if: github.event_name == 'push' && endsWith(github.event.ref, 'scylla') @@ -153,10 +193,10 @@ jobs: steps: - uses: actions/download-artifact@v4 with: - name: artifact path: dist + merge-multiple: true - - uses: pypa/gh-action-pypi-publish@master + - uses: pypa/gh-action-pypi-publish@release/v1 with: - user: __token__ + skip-existing: true password: ${{ secrets.PYPI_API_TOKEN }} From 32d9a3cccdfe71a425ecd71e81f45a98fe6b1786 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 6 Jun 2024 01:18:12 +0300 Subject: [PATCH 242/551] CI: specify specific version of openssl in choco command for some reason we are trying to download an openssl version which doesn't exist anymore on the mirror (3.3.0) and still something points to it as the lastest version, while there a new version (3.3.1) trying to hardcode the version into something that works --- .github/workflows/build-push.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 9d33f6d166..a31acbed6f 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -63,7 +63,7 @@ jobs: - name: Install OpenSSL for Windows if: runner.os == 'Windows' run: | - choco install openssl -f -y + choco install openssl --version=3.3.1 -f -y - name: Install OpenSSL for MacOS if: runner.os == 'MacOs' From c51d4cc3f88a690d8037aea15aa2858c618d0895 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Wed, 5 Jun 2024 00:09:26 +0300 Subject: [PATCH 243/551] CI: enable Trusted publishing enable a bit more secure way to publish into pypi without the need of a token key Ref: https://docs.pypi.org/trusted-publishers/ --- .github/workflows/build-push.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index a31acbed6f..3169cec6af 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -186,6 +186,9 @@ jobs: upload_pypi: needs: [build_wheels, build_wheels_extra_arch, build_sdist] runs-on: ubuntu-latest + permissions: + id-token: write + # upload to PyPI on every tag starting with 'v' if: github.event_name == 'push' && endsWith(github.event.ref, 'scylla') # alternatively, to publish when a GitHub Release is created, use the following rule: @@ -199,4 +202,3 @@ jobs: - uses: pypa/gh-action-pypi-publish@release/v1 with: skip-existing: true - password: ${{ secrets.PYPI_API_TOKEN }} From f15e50226eb1261091f6f5fd976709cfbe8727af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 18 Jun 2024 18:14:07 +0200 Subject: [PATCH 244/551] Release 3.26.9 --- cassandra/__init__.py | 2 +- docs/conf.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cassandra/__init__.py b/cassandra/__init__.py index b9ea95ddc3..97b79d22bc 100644 --- a/cassandra/__init__.py +++ b/cassandra/__init__.py @@ -23,7 +23,7 @@ def emit(self, record): logging.getLogger('cassandra').addHandler(NullHandler()) -__version_info__ = (3, 26, 8) +__version_info__ = (3, 26, 9) __version__ = '.'.join(map(str, __version_info__)) diff --git a/docs/conf.py b/docs/conf.py index 466bf9e84a..2d576988ff 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,14 +10,14 @@ # -- Global variables # Build documentation for the following tags and branches -TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.8-scylla'] +TAGS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla', '3.26.9-scylla'] BRANCHES = ['master'] # Set the latest version. -LATEST_VERSION = '3.26.8-scylla' +LATEST_VERSION = '3.26.9-scylla' # Set which versions are not released yet. UNSTABLE_VERSIONS = ['master'] # Set which versions are deprecated -DEPRECATED_VERSIONS = [''] +DEPRECATED_VERSIONS = ['3.21.0-scylla', '3.22.3-scylla', '3.24.8-scylla', '3.25.4-scylla', '3.25.11-scylla'] # -- General configuration From 811199a794a2c6209aac4263c03980ca53ade5ac Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Sun, 23 Jun 2024 15:19:29 +0800 Subject: [PATCH 245/551] cassandra/query: use timezone specific API to avoid deprecated warning before this change, when testing with cqlsh using some dtest based tests, we have failures like: ``` ------------------------------------------------------------------------------------------------- Captured log call -------------------------------------------------------------------------------------------------- 15:10:02,963 ccm DEBUG cluster.py :754 | node1: (EE) /home/kefu/dev/scylladb/tools/cqlsh/bin/cqlsh.py:1063: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future vers ion. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). 15:10:02,963 cqlsh_tests.cqlsh_tests ERROR cqlsh_tests.py :534 | /home/kefu/dev/scylladb/tools/cqlsh/bin/cqlsh.py:1063: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ----------------------------------------------------------------------------------------------- Captured log teardown ------------------------------------------------------------------------------------------------ 15:10:05,989 dtest_setup DEBUG dtest_setup.py :629 | exclude_errors: [] 15:10:05,993 dtest_setup DEBUG dtest_setup.py :718 | removing ccm cluster test at: /home/kefu/.dtest/dtest-kguqevx3 15:10:06,002 dtest_setup DEBUG dtest_setup.py :721 | clearing ssl stores from [/home/kefu/.dtest/dtest-kguqevx3] directory 15:10:06,002 dtest_setup DEBUG dtest_setup.py :85 | Freeing cluster ID 20: link /home/kefu/.dtest/20 ================================================================================================== warnings summary ================================================================================================== :488 :488: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtim estamp(timestamp, datetime.UTC). cqlsh_tests/cqlsh_tests.py::TestCqlshWithSSL::test_tracing[require_client_auth=true] cqlsh_tests/cqlsh_tests.py::TestCqlshWithSSL::test_tracing[require_client_auth=false] /home/kefu/.local/lib/python3.12/site-packages/pytest_elk_reporter.py:281: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). timestamp=datetime.datetime.utcnow().isoformat(), -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html ============================================================================================== short test summary info =============================================================================================== FAILED cqlsh_tests/cqlsh_tests.py::TestCqlshWithSSL::test_tracing[require_client_auth=true] - AssertionError: Failed to execute cqlsh FAILED cqlsh_tests/cqlsh_tests.py::TestCqlshWithSSL::test_tracing[require_client_auth=false] - AssertionError: Failed to execute cqlsh ```` this happens because the warnings are printed to stderr, and we take non-empty output in stderr as an indication of test failure. in this change, we replace the deprecated API with timezone-aware API, to avoid this warning. and the tests passed. Signed-off-by: Kefu Chai --- cassandra/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index e0d6f87fd6..a15aadb629 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -19,7 +19,7 @@ """ from collections import namedtuple -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone import re import struct import time @@ -1086,7 +1086,7 @@ class TraceEvent(object): def __init__(self, description, timeuuid, source, source_elapsed, thread_name): self.description = description - self.datetime = datetime.utcfromtimestamp(unix_time_from_uuid1(timeuuid)) + self.datetime = datetime.fromtimestamp(unix_time_from_uuid1(timeuuid), tz=timezone.utc) self.source = source if source_elapsed is not None: self.source_elapsed = timedelta(microseconds=source_elapsed) From e590b7a8ad39dae61245115ff633c156a13124f6 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 20 Jun 2024 10:46:10 +0300 Subject: [PATCH 246/551] CI: add pre builds for python 3.13 adding new build so we can try out the new version of python, now it's in alpha/beta --- .github/workflows/build-pre-release.yml | 46 +++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 .github/workflows/build-pre-release.yml diff --git a/.github/workflows/build-pre-release.yml b/.github/workflows/build-pre-release.yml new file mode 100644 index 0000000000..659bf6c2af --- /dev/null +++ b/.github/workflows/build-pre-release.yml @@ -0,0 +1,46 @@ +name: Build pre release python versions + +on: [push, pull_request] + +env: + CIBW_TEST_COMMAND_LINUX: "pytest --import-mode append {project}/tests/unit -k 'not (test_connection_initialization or test_cloud)' && EVENT_LOOP_MANAGER=gevent pytest --import-mode append {project}/tests/unit/io/test_geventreactor.py" + CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" + CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libffi-devel libev libev-devel openssl openssl-devel" + CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" + CIBW_PRERELEASE_PYTHONS: True + CIBW_SKIP: cp35* cp36* *musllinux* + +jobs: + build_wheels: + name: Build wheels ${{ matrix.os }} (${{ matrix.platform }}) + if: "(!contains(github.event.pull_request.labels.*.name, 'disable-test-build')) || github.event_name == 'push' && endsWith(github.event.ref, 'scylla')" + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-latest + platform: x86_64 + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + name: Install Python + + - name: Install cibuildwheel + run: | + python3 -m pip install cibuildwheel==2.19.1 + + - name: Overwrite for Linux 64 + if: runner.os == 'Linux' && matrix.platform == 'x86_64' + run: | + echo "CIBW_BUILD=cp313*_x86_64" >> $GITHUB_ENV + + - name: Build wheels + run: | + python3 -m cibuildwheel --output-dir wheelhouse + + - uses: actions/upload-artifact@v4 + with: + name: wheels-${{ matrix.os }}-${{ matrix.platform }} + path: ./wheelhouse/*.whl \ No newline at end of file From c9b24b74841f5aecc5dac6f94454950a0ebf76ac Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 20 Jun 2024 11:42:43 +0300 Subject: [PATCH 247/551] CI: remove gevent/greenlet from build-pre-release since python3.13 doesn't seem to be able to build those yet. we'll disable those tests, and make sure it's ignore in the requirements.txt --- .github/workflows/build-pre-release.yml | 2 +- test-requirements.txt | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-pre-release.yml b/.github/workflows/build-pre-release.yml index 659bf6c2af..251f816312 100644 --- a/.github/workflows/build-pre-release.yml +++ b/.github/workflows/build-pre-release.yml @@ -3,7 +3,7 @@ name: Build pre release python versions on: [push, pull_request] env: - CIBW_TEST_COMMAND_LINUX: "pytest --import-mode append {project}/tests/unit -k 'not (test_connection_initialization or test_cloud)' && EVENT_LOOP_MANAGER=gevent pytest --import-mode append {project}/tests/unit/io/test_geventreactor.py" + CIBW_TEST_COMMAND_LINUX: "pytest --import-mode append {project}/tests/unit -k 'not (test_connection_initialization or test_cloud)'" CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libffi-devel libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" diff --git a/test-requirements.txt b/test-requirements.txt index fa6afd6711..2851efc3db 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,9 +7,9 @@ sure pure-sasl twisted[tls]; python_version >= '3.5' twisted[tls]==19.2.1; python_version < '3.5' -gevent>=1.0; platform_machine != 'i686' and platform_machine != 'win32' -gevent==23.9.0; platform_machine == 'i686' or platform_machine == 'win32' -eventlet>=0.33.3 +gevent>=1.0; python_version < '3.13' and platform_machine != 'i686' and platform_machine != 'win32' +gevent==23.9.0; python_version < '3.13' and (platform_machine == 'i686' or platform_machine == 'win32') +eventlet>=0.33.3; python_version < '3.13' cython packaging futurist; python_version >= '3.7' From 2106af344f3d4e041874128cbc9fc346449faa84 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 1 Jul 2024 13:42:52 +0300 Subject: [PATCH 248/551] CI: build with manylinux_2_24 since centos7 is EOL, and it's mirrors now broken we are switching to newer manylinux version Ref: https://github.com/pypa/cibuildwheel/issues/1772 Ref: https://github.com/pypa/manylinux/issues/1641 --- .github/workflows/build-pre-release.yml | 3 ++- .github/workflows/build-push.yml | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-pre-release.yml b/.github/workflows/build-pre-release.yml index 251f816312..a9cc40dfaa 100644 --- a/.github/workflows/build-pre-release.yml +++ b/.github/workflows/build-pre-release.yml @@ -5,10 +5,11 @@ on: [push, pull_request] env: CIBW_TEST_COMMAND_LINUX: "pytest --import-mode append {project}/tests/unit -k 'not (test_connection_initialization or test_cloud)'" CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" - CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libffi-devel libev libev-devel openssl openssl-devel" + CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux && yum install -y libffi-devel libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" CIBW_PRERELEASE_PYTHONS: True CIBW_SKIP: cp35* cp36* *musllinux* + CIBW_MANYLINUX_X86_64_IMAGE: manylinux_2_28 jobs: build_wheels: diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 3169cec6af..1cd3d13f29 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -8,9 +8,13 @@ env: CIBW_TEST_COMMAND_MACOS: "pytest --import-mode append {project}/tests/unit -k 'not (test_multi_timer_validation or test_empty_connections or test_connection_initialization or test_timer_cancellation or test_cloud)' " CIBW_TEST_COMMAND_WINDOWS: "pytest --import-mode append {project}/tests/unit -k \"not (test_deserialize_date_range_year or test_datetype or test_libevreactor or test_connection_initialization or test_cloud)\" " CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" - CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && yum install -y libffi-devel libev libev-devel openssl openssl-devel" + CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux && yum install -y libffi-devel libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" CIBW_SKIP: cp35* cp36* *musllinux* + CIBW_MANYLINUX_X86_64_IMAGE: manylinux_2_28 + CIBW_MANYLINUX_PYPY_X86_64_IMAGE: manylinux_2_28 + CIBW_MANYLINUX_AARCH64_IMAGE: manylinux_2_28 + CIBW_MANYLINUX_PYPY_AARCH64_IMAGE: manylinux_2_28 jobs: build_wheels: From af1cbb833d6b8fb0d5f5a4dbae80e26bd7079cf7 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Mon, 1 Jul 2024 16:18:09 +0300 Subject: [PATCH 249/551] CI: disable 32bit builds we don't have manylinux for those anymore, and probably very little usage --- .github/workflows/build-push.yml | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 1cd3d13f29..aad522a449 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -10,7 +10,7 @@ env: CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux && yum install -y libffi-devel libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" - CIBW_SKIP: cp35* cp36* *musllinux* + CIBW_SKIP: cp35* cp36* pp*i686 *musllinux* CIBW_MANYLINUX_X86_64_IMAGE: manylinux_2_28 CIBW_MANYLINUX_PYPY_X86_64_IMAGE: manylinux_2_28 CIBW_MANYLINUX_AARCH64_IMAGE: manylinux_2_28 @@ -28,15 +28,9 @@ jobs: - os: ubuntu-latest platform: x86_64 - - os: ubuntu-latest - platform: i686 - - os: ubuntu-latest platform: PyPy - - os: windows-latest - platform: win32 - - os: windows-latest platform: win64 @@ -79,12 +73,6 @@ jobs: run: | echo "CIBW_BUILD=cp3*_x86_64" >> $GITHUB_ENV - - name: Overwrite for Linux 32 - if: runner.os == 'Linux' && matrix.platform == 'i686' - run: | - echo "CIBW_BUILD=cp*_i686" >> $GITHUB_ENV - echo "CIBW_TEST_COMMAND_LINUX=" >> $GITHUB_ENV - - name: Overwrite for Linux PyPy if: runner.os == 'Linux' && matrix.platform == 'PyPy' run: | @@ -96,11 +84,6 @@ jobs: run: | echo "CIBW_BUILD=cp*win_amd64" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append - - name: Overwrite for Windows 32 - if: runner.os == 'Windows' && matrix.platform == 'win32' - run: | - echo "CIBW_BUILD=cp*win32" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append - - name: Overwrite for Windows PyPY if: runner.os == 'Windows' && matrix.platform == 'PyPy' run: | From 11d3499351373c27bc39cff85b4cea393b67b7a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Tue, 2 Jul 2024 20:50:27 +0200 Subject: [PATCH 250/551] test_metadata: Don't assume extensions are empty by default This is not true in new versions of Scylla --- tests/integration/standard/test_metadata.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index c561491ab4..39018ef5d8 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -972,9 +972,6 @@ def test_table_extensions(self): table_meta = ks_meta.tables[t] view_meta = table_meta.views[v] - self.assertFalse(table_meta.extensions) - self.assertFalse(view_meta.extensions) - original_table_cql = table_meta.export_as_string() original_view_cql = view_meta.export_as_string() @@ -990,8 +987,6 @@ def after_table_cql(cls, table_meta, ext_key, ext_blob): class Ext1(Ext0): name = t + '##' - self.assertFalse(table_meta.extensions) - self.assertFalse(view_meta.extensions) self.assertIn(Ext0.name, _RegisteredExtensionType._extension_registry) self.assertIn(Ext1.name, _RegisteredExtensionType._extension_registry) # There will bee the RLAC extension here. From 8a4387ae3f36522cc10841c607abb8182c6f8286 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Sun, 9 Jun 2024 22:35:29 +0300 Subject: [PATCH 251/551] CI: download libev via conan, for windows builds to have it windows builds so far was running with having libev available and until this sync the fallback for python 3.12 was asyncio eventloop, but now we fail and not fall back to asyncio. so all unittest on windows are failing on any import from cassandra.connection. in this change we use conan to download libev, and using it to compile the driver with libev Ref: https://conan.io/center/recipes/libev --- .github/workflows/build-push.yml | 12 ++++++- MANIFEST.in | 1 + cassandra/io/libevwrapper.c | 2 ++ conanfile.py | 57 ++++++++++++++++++++++++++++++++ setup.py | 18 +++++++++- 5 files changed, 88 insertions(+), 2 deletions(-) create mode 100644 conanfile.py diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index aad522a449..53be975be1 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -10,7 +10,7 @@ env: CIBW_BEFORE_TEST: "pip install -r {project}/test-requirements.txt" CIBW_BEFORE_BUILD_LINUX: "rm -rf ~/.pyxbld && rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux && yum install -y libffi-devel libev libev-devel openssl openssl-devel" CIBW_ENVIRONMENT: "CASS_DRIVER_BUILD_CONCURRENCY=2 CFLAGS='-g0 -O3'" - CIBW_SKIP: cp35* cp36* pp*i686 *musllinux* + CIBW_SKIP: cp35* cp36* cp37* pp*i686 *musllinux* CIBW_MANYLINUX_X86_64_IMAGE: manylinux_2_28 CIBW_MANYLINUX_PYPY_X86_64_IMAGE: manylinux_2_28 CIBW_MANYLINUX_AARCH64_IMAGE: manylinux_2_28 @@ -63,6 +63,16 @@ jobs: run: | choco install openssl --version=3.3.1 -f -y + - name: Install Conan + if: runner.os == 'Windows' + uses: turtlebrowser/get-conan@main + + - name: configure libev for Windows + if: runner.os == 'Windows' + run: | + conan profile detect + conan install conanfile.py + - name: Install OpenSSL for MacOS if: runner.os == 'MacOs' run: | diff --git a/MANIFEST.in b/MANIFEST.in index 660db719b0..6bb26b0e5c 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,3 +4,4 @@ include cassandra/io/libevwrapper.c include cassandra/*.pyx include cassandra/*.pxd include cassandra/*.h +graft build-release \ No newline at end of file diff --git a/cassandra/io/libevwrapper.c b/cassandra/io/libevwrapper.c index 99e1df30f7..bbb902b757 100644 --- a/cassandra/io/libevwrapper.c +++ b/cassandra/io/libevwrapper.c @@ -1,3 +1,5 @@ +#pragma comment(lib, "Ws2_32.Lib") + #include #include diff --git a/conanfile.py b/conanfile.py new file mode 100644 index 0000000000..bc2b27c1c6 --- /dev/null +++ b/conanfile.py @@ -0,0 +1,57 @@ +import json +from pathlib import Path + +from conan import ConanFile +from conan.tools.layout import basic_layout +from conan.internal import check_duplicated_generator +from conan.tools.files import save + + +CONAN_COMMANDLINE_FILENAME = "conandeps.env" + +class CommandlineDeps: + def __init__(self, conanfile): + """ + :param conanfile: ``< ConanFile object >`` The current recipe object. Always use ``self``. + """ + self._conanfile = conanfile + + def generate(self) -> None: + """ + Collects all dependencies and components, then, generating a Makefile + """ + check_duplicated_generator(self, self._conanfile) + + host_req = self._conanfile.dependencies.host + build_req = self._conanfile.dependencies.build # tool_requires + test_req = self._conanfile.dependencies.test + + content_buffer = "" + + # Filter the build_requires not activated for any requirement + dependencies = [tup for tup in list(host_req.items()) + list(build_req.items()) + list(test_req.items()) if not tup[0].build] + + for require, dep in dependencies: + # Require is not used at the moment, but its information could be used, and will be used in Conan 2.0 + if require.build: + continue + include_dir = Path(dep.package_folder) / 'include' + package_dir = Path(dep.package_folder) / 'lib' + content_buffer += json.dumps(dict(include_dirs=str(include_dir), library_dirs=str(package_dir))) + + save(self._conanfile, CONAN_COMMANDLINE_FILENAME, content_buffer) + self._conanfile.output.info(f"Generated {CONAN_COMMANDLINE_FILENAME}") + + +class python_driverConan(ConanFile): + win_bash = False + + settings = "os", "compiler", "build_type", "arch" + requires = "libev/4.33" + + def layout(self): + basic_layout(self) + + def generate(self): + pc = CommandlineDeps(self) + pc.generate() diff --git a/setup.py b/setup.py index 4a525221eb..791c8923da 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,9 @@ from __future__ import print_function import os import sys +import json import warnings +from pathlib import Path if __name__ == '__main__' and sys.argv[1] == "gevent_nosetests": print("Running gevent tests") @@ -142,6 +144,20 @@ def __init__(self, ext): murmur3_ext = Extension('cassandra.cmurmur3', sources=['cassandra/cmurmur3.c']) +is_macos = sys.platform.startswith('darwin') + +libev_includes = ['/usr/include/libev', '/usr/local/include', '/opt/local/include', '/usr/include'] +libev_libdirs = ['/usr/local/lib', '/opt/local/lib', '/usr/lib64'] +if is_macos: + libev_includes.extend(['/opt/homebrew/include', os.path.expanduser('~/homebrew/include')]) + libev_libdirs.extend(['/opt/homebrew/lib']) + +conan_envfile = Path(__file__).parent / 'build-release/conan/conandeps.env' +if conan_envfile.exists(): + conan_paths = json.loads(conan_envfile.read_text()) + libev_includes.extend([conan_paths.get('include_dirs')]) + libev_libdirs.extend([conan_paths.get('library_dirs')]) + libev_ext = Extension('cassandra.io.libevwrapper', sources=['cassandra/io/libevwrapper.c'], include_dirs=['/usr/include/libev', '/usr/local/include', '/opt/local/include'], @@ -184,7 +200,7 @@ def __init__(self, ext): try_extensions = "--no-extensions" not in sys.argv and is_supported_platform and is_supported_arch and not os.environ.get('CASS_DRIVER_NO_EXTENSIONS') try_murmur3 = try_extensions and "--no-murmur3" not in sys.argv -try_libev = try_extensions and "--no-libev" not in sys.argv and not is_pypy and not is_windows +try_libev = try_extensions and "--no-libev" not in sys.argv and not is_pypy try_cython = try_extensions and "--no-cython" not in sys.argv and not is_pypy and not os.environ.get('CASS_DRIVER_NO_CYTHON') try_cython &= 'egg_info' not in sys.argv # bypass setup_requires for pip egg_info calls, which will never have --install-option"--no-cython" coming fomr pip From 2d1c78712fc95189e626b74b933609b4081c1461 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 19 Jul 2023 18:09:45 +0200 Subject: [PATCH 252/551] Drop 'six' from dependencies As we no longer support Python 2, there is no reason to keep this dependency. This commit removes all usages of six and removes it from dependencies. --- benchmarks/callback_full_pipeline.py | 1 - benchmarks/future_batches.py | 2 +- benchmarks/future_full_pipeline.py | 2 +- benchmarks/sync.py | 1 - cassandra/auth.py | 16 ++- cassandra/cluster.py | 32 +++--- cassandra/compat.py | 20 ---- cassandra/concurrent.py | 26 ++--- cassandra/connection.py | 14 +-- cassandra/cqlengine/__init__.py | 8 +- cassandra/cqlengine/columns.py | 7 +- cassandra/cqlengine/connection.py | 3 +- cassandra/cqlengine/management.py | 5 +- cassandra/cqlengine/models.py | 8 +- cassandra/cqlengine/operators.py | 4 +- cassandra/cqlengine/query.py | 25 +++-- cassandra/cqlengine/statements.py | 25 ++--- cassandra/cqlengine/usertype.py | 10 +- cassandra/cqltypes.py | 100 ++++++------------ cassandra/cython_marshal.pyx | 16 +-- cassandra/datastax/cloud/__init__.py | 11 +- cassandra/datastax/graph/fluent/_query.py | 3 +- .../datastax/graph/fluent/_serializers.py | 12 +-- cassandra/datastax/graph/graphson.py | 53 +++------- cassandra/datastax/graph/query.py | 12 +-- cassandra/datastax/insights/registry.py | 3 +- cassandra/datastax/insights/reporter.py | 7 +- cassandra/datastax/insights/serializers.py | 6 +- cassandra/deserializers.pyx | 4 - cassandra/encoder.py | 65 ++++-------- cassandra/io/asyncorereactor.py | 1 - cassandra/io/eventletreactor.py | 4 +- cassandra/io/geventreactor.py | 1 - cassandra/io/libevreactor.py | 1 - cassandra/marshal.py | 42 ++------ cassandra/metadata.py | 32 +++--- cassandra/murmur3.py | 1 - cassandra/protocol.py | 14 +-- cassandra/query.py | 8 +- cassandra/scylla/cloud.py | 14 +-- cassandra/segment.py | 4 - cassandra/util.py | 42 +++----- docs/installation.rst | 2 +- .../execute_async_with_queue.py | 2 +- requirements.txt | 1 - setup.py | 3 +- tests/integration/__init__.py | 3 +- tests/integration/advanced/__init__.py | 2 +- tests/integration/advanced/graph/__init__.py | 23 ++-- .../advanced/graph/fluent/__init__.py | 11 +- .../advanced/graph/fluent/test_graph.py | 6 +- .../integration/advanced/graph/test_graph.py | 1 - .../advanced/graph/test_graph_datatype.py | 13 ++- .../advanced/graph/test_graph_query.py | 7 +- .../integration/advanced/test_cont_paging.py | 1 - tests/integration/cloud/test_cloud.py | 8 +- .../columns/test_container_columns.py | 3 +- .../cqlengine/columns/test_value_io.py | 9 +- .../management/test_compaction_settings.py | 3 +- .../cqlengine/management/test_management.py | 1 - .../model/test_class_construction.py | 1 - .../operators/test_where_operators.py | 20 ++-- .../statements/test_base_statement.py | 3 +- .../statements/test_delete_statement.py | 21 ++-- .../statements/test_insert_statement.py | 8 +- .../statements/test_select_statement.py | 29 +++-- .../statements/test_update_statement.py | 13 ++- .../cqlengine/statements/test_where_clause.py | 3 +- .../integration/cqlengine/test_batch_query.py | 3 - .../cqlengine/test_lwt_conditional.py | 3 +- tests/integration/datatype_utils.py | 11 +- tests/integration/long/test_ipv6.py | 1 - .../integration/simulacron/test_connection.py | 1 - tests/integration/simulacron/utils.py | 2 +- .../standard/test_authentication.py | 1 - .../standard/test_client_warnings.py | 1 - tests/integration/standard/test_concurrent.py | 2 - tests/integration/standard/test_connection.py | 1 - .../standard/test_custom_payload.py | 8 +- .../standard/test_custom_protocol_handler.py | 3 +- tests/integration/standard/test_metadata.py | 17 ++- tests/integration/standard/test_query.py | 5 +- .../integration/standard/test_query_paging.py | 1 - .../standard/test_single_interface.py | 4 +- tests/integration/standard/test_types.py | 33 ++---- tests/integration/standard/test_udts.py | 7 +- tests/unit/advanced/cloud/test_cloud.py | 4 +- tests/unit/advanced/test_graph.py | 22 ++-- tests/unit/cqlengine/test_connection.py | 2 - tests/unit/io/utils.py | 15 ++- tests/unit/test_auth.py | 5 +- tests/unit/test_cluster.py | 3 +- tests/unit/test_concurrent.py | 2 +- tests/unit/test_connection.py | 7 +- tests/unit/test_control_connection.py | 4 +- tests/unit/test_metadata.py | 23 ++-- tests/unit/test_orderedmap.py | 7 +- tests/unit/test_parameter_binding.py | 7 +- tests/unit/test_policies.py | 15 ++- tests/unit/test_protocol.py | 1 - tests/unit/test_query.py | 4 +- tests/unit/test_response_future.py | 1 - tests/unit/test_segment.py | 34 +++--- tests/unit/test_timestamps.py | 6 +- tests/unit/test_types.py | 8 +- tox.ini | 1 - 106 files changed, 398 insertions(+), 739 deletions(-) delete mode 100644 cassandra/compat.py diff --git a/benchmarks/callback_full_pipeline.py b/benchmarks/callback_full_pipeline.py index e3ecfe3be5..a4a4c33315 100644 --- a/benchmarks/callback_full_pipeline.py +++ b/benchmarks/callback_full_pipeline.py @@ -18,7 +18,6 @@ from threading import Event from base import benchmark, BenchmarkThread -from six.moves import range log = logging.getLogger(__name__) diff --git a/benchmarks/future_batches.py b/benchmarks/future_batches.py index 8cd915ebab..de4484e617 100644 --- a/benchmarks/future_batches.py +++ b/benchmarks/future_batches.py @@ -14,7 +14,7 @@ import logging from base import benchmark, BenchmarkThread -from six.moves import queue +import queue log = logging.getLogger(__name__) diff --git a/benchmarks/future_full_pipeline.py b/benchmarks/future_full_pipeline.py index 9a9fcfcd50..901573c18e 100644 --- a/benchmarks/future_full_pipeline.py +++ b/benchmarks/future_full_pipeline.py @@ -14,7 +14,7 @@ import logging from base import benchmark, BenchmarkThread -from six.moves import queue +import queue log = logging.getLogger(__name__) diff --git a/benchmarks/sync.py b/benchmarks/sync.py index f2a45fcd7d..96e744f700 100644 --- a/benchmarks/sync.py +++ b/benchmarks/sync.py @@ -13,7 +13,6 @@ # limitations under the License. from base import benchmark, BenchmarkThread -from six.moves import range class Runner(BenchmarkThread): diff --git a/cassandra/auth.py b/cassandra/auth.py index dcee131f4d..f41ba9f73d 100644 --- a/cassandra/auth.py +++ b/cassandra/auth.py @@ -32,8 +32,6 @@ except ImportError: SASLClient = None -import six - log = logging.getLogger(__name__) # Custom payload keys related to DSE Unified Auth @@ -270,15 +268,15 @@ def __init__(self, username, password): self.password = password def get_mechanism(self): - return six.b("PLAIN") + return b"PLAIN" def get_initial_challenge(self): - return six.b("PLAIN-START") + return b"PLAIN-START" def evaluate_challenge(self, challenge): - if challenge == six.b('PLAIN-START'): + if challenge == b'PLAIN-START': data = "\x00%s\x00%s" % (self.username, self.password) - return data if six.PY2 else data.encode() + return data.encode() raise Exception('Did not receive a valid challenge response from server') @@ -297,13 +295,13 @@ def __init__(self, host, service, qops, properties): self.sasl = SASLClient(host, service, 'GSSAPI', qops=qops, **properties) def get_mechanism(self): - return six.b("GSSAPI") + return b"GSSAPI" def get_initial_challenge(self): - return six.b("GSSAPI-START") + return b"GSSAPI-START" def evaluate_challenge(self, challenge): - if challenge == six.b('GSSAPI-START'): + if challenge == b'GSSAPI-START': return self.sasl.process() else: return self.sasl.process(challenge) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 5f2669c0bc..71be215ab1 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -21,6 +21,7 @@ import atexit from binascii import hexlify from collections import defaultdict +from collections.abc import Mapping from concurrent.futures import ThreadPoolExecutor, FIRST_COMPLETED, wait as wait_futures from copy import copy from functools import partial, wraps @@ -30,8 +31,7 @@ from warnings import warn from random import random import re -import six -from six.moves import filter, range, queue as Queue +import queue import socket import sys import time @@ -82,7 +82,6 @@ from cassandra.marshal import int64_pack from cassandra.tablets import Tablet, Tablets from cassandra.timestamps import MonotonicTimestampGenerator -from cassandra.compat import Mapping from cassandra.util import _resolve_contact_points_to_string_map, Version from cassandra.datastax.insights.reporter import MonitorReporter @@ -113,9 +112,6 @@ except ImportError: from cassandra.util import WeakSet # NOQA -if six.PY3: - long = int - def _is_eventlet_monkey_patched(): if 'eventlet.patcher' not in sys.modules: return False @@ -1219,7 +1215,7 @@ def __init__(self, else: self._contact_points_explicit = True - if isinstance(contact_points, six.string_types): + if isinstance(contact_points, str): raise TypeError("contact_points should not be a string, it should be a sequence (e.g. list) of strings") if None in contact_points: @@ -1882,8 +1878,8 @@ def _new_session(self, keyspace): return session def _session_register_user_types(self, session): - for keyspace, type_map in six.iteritems(self._user_types): - for udt_name, klass in six.iteritems(type_map): + for keyspace, type_map in self._user_types.items(): + for udt_name, klass in type_map.items(): session.user_type_registered(keyspace, udt_name, klass) def _cleanup_failed_on_up_handling(self, host): @@ -2767,7 +2763,7 @@ def execute_async(self, query, parameters=None, trace=False, custom_payload=None """ custom_payload = custom_payload if custom_payload else {} if execute_as: - custom_payload[_proxy_execute_key] = six.b(execute_as) + custom_payload[_proxy_execute_key] = execute_as.encode() future = self._create_response_future( query, parameters, trace, custom_payload, timeout, @@ -2831,8 +2827,8 @@ def execute_graph_async(self, query, parameters=None, trace=False, execution_pro custom_payload = execution_profile.graph_options.get_options_map() if execute_as: - custom_payload[_proxy_execute_key] = six.b(execute_as) - custom_payload[_request_timeout_key] = int64_pack(long(execution_profile.request_timeout * 1000)) + custom_payload[_proxy_execute_key] = execute_as.encode() + custom_payload[_request_timeout_key] = int64_pack(int(execution_profile.request_timeout * 1000)) future = self._create_response_future(query, parameters=None, trace=trace, custom_payload=custom_payload, timeout=_NOT_SET, execution_profile=execution_profile) @@ -2969,7 +2965,7 @@ def _create_response_future(self, query, parameters, trace, custom_payload, prepared_statement = None - if isinstance(query, six.string_types): + if isinstance(query, str): query = SimpleStatement(query) elif isinstance(query, PreparedStatement): query = query.bind(parameters) @@ -3437,10 +3433,6 @@ def user_type_registered(self, keyspace, user_type, klass): 'User type %s does not exist in keyspace %s' % (user_type, keyspace)) field_names = type_meta.field_names - if six.PY2: - # go from unicode to string to avoid decode errors from implicit - # decode when formatting non-ascii values - field_names = [fn.encode('utf-8') for fn in field_names] def encode(val): return '{ %s }' % ' , '.join('%s : %s' % ( @@ -4208,7 +4200,7 @@ def _get_schema_mismatches(self, peers_result, local_result, local_address): log.debug("[control connection] Schemas match") return None - return dict((version, list(nodes)) for version, nodes in six.iteritems(versions)) + return dict((version, list(nodes)) for version, nodes in versions.items()) def _get_peers_query(self, peers_query_type, connection=None): """ @@ -4327,7 +4319,7 @@ class _Scheduler(Thread): is_shutdown = False def __init__(self, executor): - self._queue = Queue.PriorityQueue() + self._queue = queue.PriorityQueue() self._scheduled_tasks = set() self._count = count() self._executor = executor @@ -4385,7 +4377,7 @@ def run(self): else: self._queue.put_nowait((run_at, i, task)) break - except Queue.Empty: + except queue.Empty: pass time.sleep(0.1) diff --git a/cassandra/compat.py b/cassandra/compat.py deleted file mode 100644 index 83c1b104e5..0000000000 --- a/cassandra/compat.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright DataStax, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import six - -if six.PY2: - from collections import Mapping -elif six.PY3: - from collections.abc import Mapping diff --git a/cassandra/concurrent.py b/cassandra/concurrent.py index 0228f297fe..fb8f26e1cc 100644 --- a/cassandra/concurrent.py +++ b/cassandra/concurrent.py @@ -16,8 +16,6 @@ from collections import namedtuple from heapq import heappush, heappop from itertools import cycle -import six -from six.moves import xrange, zip from threading import Condition import sys @@ -119,7 +117,7 @@ def execute(self, concurrency, fail_fast): self._current = 0 self._exec_count = 0 with self._condition: - for n in xrange(concurrency): + for n in range(concurrency): if not self._execute_next(): break return self._results() @@ -143,17 +141,13 @@ def _execute(self, idx, statement, params): callback=self._on_success, callback_args=args, errback=self._on_error, errback_args=args) except Exception as exc: - # exc_info with fail_fast to preserve stack trace info when raising on the client thread - # (matches previous behavior -- not sure why we wouldn't want stack trace in the other case) - e = sys.exc_info() if self._fail_fast and six.PY2 else exc - # If we're not failing fast and all executions are raising, there is a chance of recursing # here as subsequent requests are attempted. If we hit this threshold, schedule this result/retry # and let the event loop thread return. if self._exec_depth < self.max_error_recursion: - self._put_result(e, idx, False) + self._put_result(exc, idx, False) else: - self.session.submit(self._put_result, e, idx, False) + self.session.submit(self._put_result, exc, idx, False) self._exec_depth -= 1 def _on_success(self, result, future, idx): @@ -163,14 +157,6 @@ def _on_success(self, result, future, idx): def _on_error(self, result, future, idx): self._put_result(result, idx, False) - @staticmethod - def _raise(exc): - if six.PY2 and isinstance(exc, tuple): - (exc_type, value, traceback) = exc - six.reraise(exc_type, value, traceback) - else: - raise exc - class ConcurrentExecutorGenResults(_ConcurrentExecutor): @@ -190,7 +176,7 @@ def _results(self): try: self._condition.release() if self._fail_fast and not res[0]: - self._raise(res[1]) + raise res[1] yield res finally: self._condition.acquire() @@ -221,9 +207,9 @@ def _results(self): while self._current < self._exec_count: self._condition.wait() if self._exception and self._fail_fast: - self._raise(self._exception) + raise self._exception if self._exception and self._fail_fast: # raise the exception even if there was no wait - self._raise(self._exception) + raise self._exception return [r[1] for r in sorted(self._results_queue)] diff --git a/cassandra/connection.py b/cassandra/connection.py index 754555a0d4..9fa2a991ec 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -19,8 +19,6 @@ from heapq import heappush, heappop import io import logging -import six -from six.moves import range import socket import struct import sys @@ -36,7 +34,7 @@ if 'gevent.monkey' in sys.modules: from gevent.queue import Queue, Empty else: - from six.moves.queue import Queue, Empty # noqa + from queue import Queue, Empty # noqa from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion from cassandra.marshal import int32_pack @@ -613,12 +611,6 @@ def wrapper(self, *args, **kwargs): DEFAULT_CQL_VERSION = '3.0.0' -if six.PY3: - def int_from_buf_item(i): - return i -else: - int_from_buf_item = ord - class _ConnectionIOBuffer(object): """ @@ -1164,7 +1156,7 @@ def _read_frame_header(self): buf = self._io_buffer.cql_frame_buffer.getvalue() pos = len(buf) if pos: - version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK + version = buf[0] & PROTOCOL_VERSION_MASK if version not in ProtocolVersion.SUPPORTED_VERSIONS: raise ProtocolError("This version of the driver does not support protocol version %d" % version) frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2 @@ -1367,7 +1359,7 @@ def _handle_options_response(self, options_response): remote_supported_compressions) else: compression_type = None - if isinstance(self.compression, six.string_types): + if isinstance(self.compression, str): # the user picked a specific compression type ('snappy' or 'lz4') if self.compression not in remote_supported_compressions: raise ProtocolError( diff --git a/cassandra/cqlengine/__init__.py b/cassandra/cqlengine/__init__.py index e2a952d682..b9466e961b 100644 --- a/cassandra/cqlengine/__init__.py +++ b/cassandra/cqlengine/__init__.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - - # Caching constants. CACHING_ALL = "ALL" CACHING_KEYS_ONLY = "KEYS_ONLY" @@ -31,7 +28,4 @@ class ValidationError(CQLEngineException): class UnicodeMixin(object): - if six.PY3: - __str__ = lambda x: x.__unicode__() - else: - __str__ = lambda x: six.text_type(x).encode('utf-8') + __str__ = lambda x: x.__unicode__() diff --git a/cassandra/cqlengine/columns.py b/cassandra/cqlengine/columns.py index e0012858b4..4adb88476b 100644 --- a/cassandra/cqlengine/columns.py +++ b/cassandra/cqlengine/columns.py @@ -15,7 +15,6 @@ from copy import deepcopy, copy from datetime import date, datetime, timedelta import logging -import six from uuid import UUID as _UUID from cassandra import util @@ -327,7 +326,7 @@ class Blob(Column): def to_database(self, value): - if not isinstance(value, (six.binary_type, bytearray)): + if not isinstance(value, (bytes, bytearray)): raise Exception("expecting a binary, got a %s" % type(value)) val = super(Bytes, self).to_database(value) @@ -381,7 +380,7 @@ def __init__(self, min_length=None, max_length=None, **kwargs): def validate(self, value): value = super(Text, self).validate(value) - if not isinstance(value, (six.string_types, bytearray)) and value is not None: + if not isinstance(value, (str, bytearray)) and value is not None: raise ValidationError('{0} {1} is not a string'.format(self.column_name, type(value))) if self.max_length is not None: if value and len(value) > self.max_length: @@ -655,7 +654,7 @@ def validate(self, value): return if isinstance(val, _UUID): return val - if isinstance(val, six.string_types): + if isinstance(val, str): try: return _UUID(val) except ValueError: diff --git a/cassandra/cqlengine/connection.py b/cassandra/cqlengine/connection.py index d98020b8a8..516ff0e4ed 100644 --- a/cassandra/cqlengine/connection.py +++ b/cassandra/cqlengine/connection.py @@ -14,7 +14,6 @@ from collections import defaultdict import logging -import six import threading from cassandra.cluster import Cluster, _ConfigMode, _NOT_SET, NoHostAvailable, UserTypeDoesNotExist, ConsistencyLevel @@ -346,7 +345,7 @@ def execute(query, params=None, consistency_level=None, timeout=NOT_SET, connect elif isinstance(query, BaseCQLStatement): params = query.get_context() query = SimpleStatement(str(query), consistency_level=consistency_level, fetch_size=query.fetch_size) - elif isinstance(query, six.string_types): + elif isinstance(query, str): query = SimpleStatement(query, consistency_level=consistency_level) log.debug(format_log_context('Query: {}, Params: {}'.format(query.query_string, params), connection=connection)) diff --git a/cassandra/cqlengine/management.py b/cassandra/cqlengine/management.py index 5e49fb54e5..6c752fa5b0 100644 --- a/cassandra/cqlengine/management.py +++ b/cassandra/cqlengine/management.py @@ -16,7 +16,6 @@ import json import logging import os -import six import warnings from itertools import product @@ -232,7 +231,7 @@ def _sync_table(model, connection=None): except CQLEngineException as ex: # 1.2 doesn't return cf names, so we have to examine the exception # and ignore if it says the column family already exists - if "Cannot add already existing column family" not in six.text_type(ex): + if "Cannot add already existing column family" not in str(ex): raise else: log.debug(format_log_context("sync_table checking existing table %s", keyspace=ks_name, connection=connection), cf_name) @@ -477,7 +476,7 @@ def _update_options(model, connection=None): except KeyError: msg = format_log_context("Invalid table option: '%s'; known options: %s", keyspace=ks_name, connection=connection) raise KeyError(msg % (name, existing_options.keys())) - if isinstance(existing_value, six.string_types): + if isinstance(existing_value, str): if value != existing_value: update_options[name] = value else: diff --git a/cassandra/cqlengine/models.py b/cassandra/cqlengine/models.py index b3c7c9e37f..bc00001666 100644 --- a/cassandra/cqlengine/models.py +++ b/cassandra/cqlengine/models.py @@ -14,7 +14,6 @@ import logging import re -import six from warnings import warn from cassandra.cqlengine import CQLEngineException, ValidationError @@ -614,7 +613,7 @@ def __iter__(self): def __getitem__(self, key): """ Returns column's value. """ - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._columns.keys(): raise KeyError @@ -622,7 +621,7 @@ def __getitem__(self, key): def __setitem__(self, key, val): """ Sets a column's value. """ - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._columns.keys(): raise KeyError @@ -1042,8 +1041,7 @@ def _transform_column(col_name, col_obj): return klass -@six.add_metaclass(ModelMetaClass) -class Model(BaseModel): +class Model(BaseModel, metaclass=ModelMetaClass): __abstract__ = True """ *Optional.* Indicates that this model is only intended to be used as a base class for other models. diff --git a/cassandra/cqlengine/operators.py b/cassandra/cqlengine/operators.py index bba505583c..2adf51758d 100644 --- a/cassandra/cqlengine/operators.py +++ b/cassandra/cqlengine/operators.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import six from cassandra.cqlengine import UnicodeMixin @@ -44,8 +43,7 @@ def __init__(cls, name, bases, dct): super(OpMapMeta, cls).__init__(name, bases, dct) -@six.add_metaclass(OpMapMeta) -class BaseWhereOperator(BaseQueryOperator): +class BaseWhereOperator(BaseQueryOperator, metaclass=OpMapMeta): """ base operator used for where clauses """ @classmethod def get_operator(cls, symbol): diff --git a/cassandra/cqlengine/query.py b/cassandra/cqlengine/query.py index 11f664ec02..40134e884e 100644 --- a/cassandra/cqlengine/query.py +++ b/cassandra/cqlengine/query.py @@ -16,7 +16,6 @@ from datetime import datetime, timedelta from functools import partial import time -import six from warnings import warn from cassandra.query import SimpleStatement, BatchType as CBatchType, BatchStatement @@ -103,29 +102,29 @@ def in_(self, item): used where you'd typically want to use python's `in` operator """ - return WhereClause(six.text_type(self), InOperator(), item) + return WhereClause(str(self), InOperator(), item) def contains_(self, item): """ Returns a CONTAINS operator """ - return WhereClause(six.text_type(self), ContainsOperator(), item) + return WhereClause(str(self), ContainsOperator(), item) def __eq__(self, other): - return WhereClause(six.text_type(self), EqualsOperator(), self._to_database(other)) + return WhereClause(str(self), EqualsOperator(), self._to_database(other)) def __gt__(self, other): - return WhereClause(six.text_type(self), GreaterThanOperator(), self._to_database(other)) + return WhereClause(str(self), GreaterThanOperator(), self._to_database(other)) def __ge__(self, other): - return WhereClause(six.text_type(self), GreaterThanOrEqualOperator(), self._to_database(other)) + return WhereClause(str(self), GreaterThanOrEqualOperator(), self._to_database(other)) def __lt__(self, other): - return WhereClause(six.text_type(self), LessThanOperator(), self._to_database(other)) + return WhereClause(str(self), LessThanOperator(), self._to_database(other)) def __le__(self, other): - return WhereClause(six.text_type(self), LessThanOrEqualOperator(), self._to_database(other)) + return WhereClause(str(self), LessThanOrEqualOperator(), self._to_database(other)) class BatchType(object): @@ -231,7 +230,7 @@ def execute(self): opener = 'BEGIN ' + (str(batch_type) + ' ' if batch_type else '') + ' BATCH' if self.timestamp: - if isinstance(self.timestamp, six.integer_types): + if isinstance(self.timestamp, int): ts = self.timestamp elif isinstance(self.timestamp, (datetime, timedelta)): ts = self.timestamp @@ -407,7 +406,7 @@ def _execute(self, statement): return result def __unicode__(self): - return six.text_type(self._select_query()) + return str(self._select_query()) def __str__(self): return str(self.__unicode__()) @@ -604,7 +603,7 @@ def batch(self, batch_obj): def first(self): try: - return six.next(iter(self)) + return next(iter(self)) except StopIteration: return None @@ -901,7 +900,7 @@ def limit(self, v): if v is None: v = 0 - if not isinstance(v, six.integer_types): + if not isinstance(v, int): raise TypeError if v == self._limit: return self @@ -925,7 +924,7 @@ def fetch_size(self, v): print(user) """ - if not isinstance(v, six.integer_types): + if not isinstance(v, int): raise TypeError if v == self._fetch_size: return self diff --git a/cassandra/cqlengine/statements.py b/cassandra/cqlengine/statements.py index c6ceb16607..d92d0b2452 100644 --- a/cassandra/cqlengine/statements.py +++ b/cassandra/cqlengine/statements.py @@ -14,8 +14,6 @@ from datetime import datetime, timedelta import time -import six -from six.moves import filter from cassandra.query import FETCH_SIZE_UNSET from cassandra.cqlengine import columns @@ -114,7 +112,7 @@ def __init__(self, field, operator, value, quote_field=True): def __unicode__(self): field = ('"{0}"' if self.quote_field else '{0}').format(self.field) - return u'{0} {1} {2}'.format(field, self.operator, six.text_type(self.query_value)) + return u'{0} {1} {2}'.format(field, self.operator, str(self.query_value)) def __hash__(self): return super(WhereClause, self).__hash__() ^ hash(self.operator) @@ -186,8 +184,7 @@ def __init__(cls, name, bases, dct): super(ContainerUpdateTypeMapMeta, cls).__init__(name, bases, dct) -@six.add_metaclass(ContainerUpdateTypeMapMeta) -class ContainerUpdateClause(AssignmentClause): +class ContainerUpdateClause(AssignmentClause, metaclass=ContainerUpdateTypeMapMeta): def __init__(self, field, value, operation=None, previous=None): super(ContainerUpdateClause, self).__init__(field, value) @@ -563,7 +560,7 @@ def add_conditional_clause(self, clause): self.conditionals.append(clause) def _get_conditionals(self): - return 'IF {0}'.format(' AND '.join([six.text_type(c) for c in self.conditionals])) + return 'IF {0}'.format(' AND '.join([str(c) for c in self.conditionals])) def get_context_size(self): return len(self.get_context()) @@ -584,7 +581,7 @@ def timestamp_normalized(self): if not self.timestamp: return None - if isinstance(self.timestamp, six.integer_types): + if isinstance(self.timestamp, int): return self.timestamp if isinstance(self.timestamp, timedelta): @@ -602,7 +599,7 @@ def __repr__(self): @property def _where(self): - return 'WHERE {0}'.format(' AND '.join([six.text_type(c) for c in self.where_clauses])) + return 'WHERE {0}'.format(' AND '.join([str(c) for c in self.where_clauses])) class SelectStatement(BaseCQLStatement): @@ -629,10 +626,10 @@ def __init__(self, fetch_size=fetch_size ) - self.fields = [fields] if isinstance(fields, six.string_types) else (fields or []) + self.fields = [fields] if isinstance(fields, str) else (fields or []) self.distinct_fields = distinct_fields self.count = count - self.order_by = [order_by] if isinstance(order_by, six.string_types) else order_by + self.order_by = [order_by] if isinstance(order_by, str) else order_by self.limit = limit self.allow_filtering = allow_filtering @@ -653,7 +650,7 @@ def __unicode__(self): qs += [self._where] if self.order_by and not self.count: - qs += ['ORDER BY {0}'.format(', '.join(six.text_type(o) for o in self.order_by))] + qs += ['ORDER BY {0}'.format(', '.join(str(o) for o in self.order_by))] if self.limit: qs += ['LIMIT {0}'.format(self.limit)] @@ -798,7 +795,7 @@ def __unicode__(self): qs += ["USING {0}".format(" AND ".join(using_options))] qs += ['SET'] - qs += [', '.join([six.text_type(c) for c in self.assignments])] + qs += [', '.join([str(c) for c in self.assignments])] if self.where_clauses: qs += [self._where] @@ -849,7 +846,7 @@ def __init__(self, table, fields=None, where=None, timestamp=None, conditionals= conditionals=conditionals ) self.fields = [] - if isinstance(fields, six.string_types): + if isinstance(fields, str): fields = [fields] for field in fields or []: self.add_field(field) @@ -874,7 +871,7 @@ def get_context(self): return ctx def add_field(self, field): - if isinstance(field, six.string_types): + if isinstance(field, str): field = FieldDeleteClause(field) if not isinstance(field, BaseClause): raise StatementException("only instances of AssignmentClause can be added to statements") diff --git a/cassandra/cqlengine/usertype.py b/cassandra/cqlengine/usertype.py index 155068d99e..7fa85f1919 100644 --- a/cassandra/cqlengine/usertype.py +++ b/cassandra/cqlengine/usertype.py @@ -13,7 +13,6 @@ # limitations under the License. import re -import six from cassandra.util import OrderedDict from cassandra.cqlengine import CQLEngineException @@ -72,7 +71,7 @@ def __ne__(self, other): return not self.__eq__(other) def __str__(self): - return "{{{0}}}".format(', '.join("'{0}': {1}".format(k, getattr(self, k)) for k, v in six.iteritems(self._values))) + return "{{{0}}}".format(', '.join("'{0}': {1}".format(k, getattr(self, k)) for k, v in self._values.items())) def has_changed_fields(self): return any(v.changed for v in self._values.values()) @@ -93,14 +92,14 @@ def __getattr__(self, attr): raise AttributeError(attr) def __getitem__(self, key): - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._fields.keys(): raise KeyError return getattr(self, key) def __setitem__(self, key, val): - if not isinstance(key, six.string_types): + if not isinstance(key, str): raise TypeError if key not in self._fields.keys(): raise KeyError @@ -198,8 +197,7 @@ def _transform_column(field_name, field_obj): return klass -@six.add_metaclass(UserTypeMetaClass) -class UserType(BaseUserType): +class UserType(BaseUserType, metaclass=UserTypeMetaClass): """ This class is used to model User Defined Types. To define a type, declare a class inheriting from this, and assign field types as class attributes: diff --git a/cassandra/cqltypes.py b/cassandra/cqltypes.py index c2c0d9f905..2daa1603a4 100644 --- a/cassandra/cqltypes.py +++ b/cassandra/cqltypes.py @@ -39,8 +39,6 @@ import re import socket import time -import six -from six.moves import range import struct import sys from uuid import UUID @@ -54,10 +52,7 @@ from cassandra import util _little_endian_flag = 1 # we always serialize LE -if six.PY3: - import ipaddress - -_ord = ord if six.PY2 else lambda x: x +import ipaddress apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.' @@ -66,16 +61,12 @@ log = logging.getLogger(__name__) -if six.PY3: - _number_types = frozenset((int, float)) - long = int +_number_types = frozenset((int, float)) + - def _name_from_hex_string(encoded_name): - bin_str = unhexlify(encoded_name) - return bin_str.decode('ascii') -else: - _number_types = frozenset((int, long, float)) - _name_from_hex_string = unhexlify +def _name_from_hex_string(encoded_name): + bin_str = unhexlify(encoded_name) + return bin_str.decode('ascii') def trim_if_startswith(s, prefix): @@ -276,8 +267,7 @@ def __str__(self): EMPTY = EmptyValue() -@six.add_metaclass(CassandraTypeType) -class _CassandraType(object): +class _CassandraType(object, metaclass=CassandraTypeType): subtypes = () num_subtypes = 0 empty_binary_ok = False @@ -380,8 +370,6 @@ def apply_parameters(cls, subtypes, names=None): raise ValueError("%s types require %d subtypes (%d given)" % (cls.typename, cls.num_subtypes, len(subtypes))) newname = cls.cass_parameterized_type_with(subtypes) - if six.PY2 and isinstance(newname, unicode): - newname = newname.encode('utf-8') return type(newname, (cls,), {'subtypes': subtypes, 'cassname': cls.cassname, 'fieldnames': names}) @classmethod @@ -412,16 +400,10 @@ class _UnrecognizedType(_CassandraType): num_subtypes = 'UNKNOWN' -if six.PY3: - def mkUnrecognizedType(casstypename): - return CassandraTypeType(casstypename, - (_UnrecognizedType,), - {'typename': "'%s'" % casstypename}) -else: - def mkUnrecognizedType(casstypename): # noqa - return CassandraTypeType(casstypename.encode('utf8'), - (_UnrecognizedType,), - {'typename': "'%s'" % casstypename}) +def mkUnrecognizedType(casstypename): + return CassandraTypeType(casstypename, + (_UnrecognizedType,), + {'typename': "'%s'" % casstypename}) class BytesType(_CassandraType): @@ -430,7 +412,7 @@ class BytesType(_CassandraType): @staticmethod def serialize(val, protocol_version): - return six.binary_type(val) + return bytes(val) class DecimalType(_CassandraType): @@ -497,25 +479,20 @@ def serialize(byts, protocol_version): return int8_pack(byts) -if six.PY2: - class AsciiType(_CassandraType): - typename = 'ascii' - empty_binary_ok = True -else: - class AsciiType(_CassandraType): - typename = 'ascii' - empty_binary_ok = True +class AsciiType(_CassandraType): + typename = 'ascii' + empty_binary_ok = True - @staticmethod - def deserialize(byts, protocol_version): - return byts.decode('ascii') + @staticmethod + def deserialize(byts, protocol_version): + return byts.decode('ascii') - @staticmethod - def serialize(var, protocol_version): - try: - return var.encode('ascii') - except UnicodeDecodeError: - return var + @staticmethod + def serialize(var, protocol_version): + try: + return var.encode('ascii') + except UnicodeDecodeError: + return var class FloatType(_CassandraType): @@ -600,7 +577,7 @@ def serialize(addr, protocol_version): # since we've already determined the AF return socket.inet_aton(addr) except: - if six.PY3 and isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)): + if isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)): return addr.packed raise ValueError("can't interpret %r as an inet address" % (addr,)) @@ -659,7 +636,7 @@ def serialize(v, protocol_version): raise TypeError('DateType arguments must be a datetime, date, or timestamp') timestamp = v - return int64_pack(long(timestamp)) + return int64_pack(int(timestamp)) class TimestampType(DateType): @@ -703,7 +680,7 @@ def serialize(val, protocol_version): try: days = val.days_from_epoch except AttributeError: - if isinstance(val, six.integer_types): + if isinstance(val, int): # the DB wants offset int values, but util.Date init takes days from epoch # here we assume int values are offset, as they would appear in CQL # short circuit to avoid subtracting just to add offset @@ -823,7 +800,7 @@ def deserialize_safe(cls, byts, protocol_version): @classmethod def serialize_safe(cls, items, protocol_version): - if isinstance(items, six.string_types): + if isinstance(items, str): raise TypeError("Received a string for a type that expects a sequence") subtype, = cls.subtypes @@ -900,7 +877,7 @@ def serialize_safe(cls, themap, protocol_version): buf = io.BytesIO() buf.write(pack(len(themap))) try: - items = six.iteritems(themap) + items = themap.items() except AttributeError: raise TypeError("Got a non-map object for a map value") inner_proto = max(3, protocol_version) @@ -981,9 +958,6 @@ class UserType(TupleType): def make_udt_class(cls, keyspace, udt_name, field_names, field_types): assert len(field_names) == len(field_types) - if six.PY2 and isinstance(udt_name, unicode): - udt_name = udt_name.encode('utf-8') - instance = cls._cache.get((keyspace, udt_name)) if not instance or instance.fieldnames != field_names or instance.subtypes != field_types: instance = type(udt_name, (cls,), {'subtypes': field_types, @@ -998,8 +972,6 @@ def make_udt_class(cls, keyspace, udt_name, field_names, field_types): @classmethod def evict_udt_class(cls, keyspace, udt_name): - if six.PY2 and isinstance(udt_name, unicode): - udt_name = udt_name.encode('utf-8') try: del cls._cache[(keyspace, udt_name)] except KeyError: @@ -1156,7 +1128,7 @@ def serialize_safe(cls, val, protocol_version): def is_counter_type(t): - if isinstance(t, six.string_types): + if isinstance(t, str): t = lookup_casstype(t) return issubclass(t, CounterColumnType) @@ -1192,7 +1164,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): - is_little_endian = bool(_ord(byts[0])) + is_little_endian = bool(byts[0]) point = point_le if is_little_endian else point_be return util.Point(*point.unpack_from(byts, 5)) # ofs = endian byte + int type @@ -1209,7 +1181,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): - is_little_endian = bool(_ord(byts[0])) + is_little_endian = bool(byts[0]) point = point_le if is_little_endian else point_be coords = ((point.unpack_from(byts, offset) for offset in range(1 + 4 + 4, len(byts), point.size))) # start = endian + int type + int count return util.LineString(coords) @@ -1238,7 +1210,7 @@ def serialize(val, protocol_version): @staticmethod def deserialize(byts, protocol_version): - is_little_endian = bool(_ord(byts[0])) + is_little_endian = bool(byts[0]) if is_little_endian: int_fmt = ' MAX_INT32 or value < MIN_INT32): + if type(value) in int and (value > MAX_INT32 or value < MIN_INT32): return Int64TypeIO return Int32TypeIO @@ -164,9 +158,7 @@ class Int64TypeIO(IntegerTypeIO): @classmethod def deserialize(cls, value, reader=None): - if six.PY3: - return value - return long(value) + return value class FloatTypeIO(GraphSONTypeIO): @@ -274,8 +266,7 @@ class BlobTypeIO(GraphSONTypeIO): @classmethod def serialize(cls, value, writer=None): value = base64.b64encode(value) - if six.PY3: - value = value.decode('utf-8') + value = value.decode('utf-8') return value @classmethod @@ -343,7 +334,7 @@ def deserialize(cls, value, reader=None): raise ValueError('Invalid duration: {0}'.format(value)) duration = {k: float(v) if v is not None else 0 - for k, v in six.iteritems(duration.groupdict())} + for k, v in duration.groupdict().items()} return datetime.timedelta(days=duration['days'], hours=duration['hours'], minutes=duration['minutes'], seconds=duration['seconds']) @@ -512,7 +503,7 @@ class JsonMapTypeIO(GraphSONTypeIO): @classmethod def serialize(cls, value, writer=None): out = {} - for k, v in six.iteritems(value): + for k, v in value.items(): out[k] = writer.serialize(v, writer) return out @@ -528,7 +519,7 @@ class MapTypeIO(GraphSONTypeIO): def definition(cls, value, writer=None): out = OrderedDict([('cqlType', cls.cql_type)]) out['definition'] = [] - for k, v in six.iteritems(value): + for k, v in value.items(): # we just need the first pair to write the def out['definition'].append(writer.definition(k)) out['definition'].append(writer.definition(v)) @@ -538,7 +529,7 @@ def definition(cls, value, writer=None): @classmethod def serialize(cls, value, writer=None): out = [] - for k, v in six.iteritems(value): + for k, v in value.items(): out.append(writer.serialize(k, writer)) out.append(writer.serialize(v, writer)) @@ -841,16 +832,10 @@ class GraphSON1Serializer(_BaseGraphSONSerializer): ]) -if ipaddress: - GraphSON1Serializer.register(ipaddress.IPv4Address, InetTypeIO) - GraphSON1Serializer.register(ipaddress.IPv6Address, InetTypeIO) - -if six.PY2: - GraphSON1Serializer.register(buffer, ByteBufferTypeIO) - GraphSON1Serializer.register(unicode, TextTypeIO) -else: - GraphSON1Serializer.register(memoryview, ByteBufferTypeIO) - GraphSON1Serializer.register(bytes, ByteBufferTypeIO) +GraphSON1Serializer.register(ipaddress.IPv4Address, InetTypeIO) +GraphSON1Serializer.register(ipaddress.IPv6Address, InetTypeIO) +GraphSON1Serializer.register(memoryview, ByteBufferTypeIO) +GraphSON1Serializer.register(bytes, ByteBufferTypeIO) class _BaseGraphSONDeserializer(object): @@ -922,9 +907,7 @@ def deserialize_int(cls, value): @classmethod def deserialize_bigint(cls, value): - if six.PY3: - return cls.deserialize_int(value) - return long(value) + return cls.deserialize_int(value) @classmethod def deserialize_double(cls, value): @@ -1007,8 +990,6 @@ def serialize(self, value, writer=None): GraphSON2Serializer.register(int, IntegerTypeIO) -if six.PY2: - GraphSON2Serializer.register(long, IntegerTypeIO) class GraphSON2Deserializer(_BaseGraphSONDeserializer): @@ -1055,7 +1036,7 @@ def deserialize(self, obj): except KeyError: pass # list and map are treated as normal json objs (could be isolated deserializers) - return {self.deserialize(k): self.deserialize(v) for k, v in six.iteritems(obj)} + return {self.deserialize(k): self.deserialize(v) for k, v in obj.items()} elif isinstance(obj, list): return [self.deserialize(o) for o in obj] else: @@ -1109,7 +1090,7 @@ def get_serializer(self, value): if self.user_types is None: try: user_types = self.context['cluster']._user_types[self.context['graph_name']] - self.user_types = dict(map(reversed, six.iteritems(user_types))) + self.user_types = dict(map(reversed, user_types.items())) except KeyError: self.user_types = {} diff --git a/cassandra/datastax/graph/query.py b/cassandra/datastax/graph/query.py index 7c0e265dbf..866df7a94c 100644 --- a/cassandra/datastax/graph/query.py +++ b/cassandra/datastax/graph/query.py @@ -15,8 +15,6 @@ import json from warnings import warn -import six - from cassandra import ConsistencyLevel from cassandra.query import Statement, SimpleStatement from cassandra.datastax.graph.types import Vertex, Edge, Path, VertexProperty @@ -77,7 +75,7 @@ def __init__(self, **kwargs): self._graph_options = {} kwargs.setdefault('graph_source', 'g') kwargs.setdefault('graph_language', GraphOptions.DEFAULT_GRAPH_LANGUAGE) - for attr, value in six.iteritems(kwargs): + for attr, value in kwargs.items(): if attr not in _graph_option_names: warn("Unknown keyword argument received for GraphOptions: {0}".format(attr)) setattr(self, attr, value) @@ -103,7 +101,7 @@ def get_options_map(self, other_options=None): for cl in ('graph-write-consistency', 'graph-read-consistency'): cl_enum = options.get(cl) if cl_enum is not None: - options[cl] = six.b(ConsistencyLevel.value_to_name[cl_enum]) + options[cl] = ConsistencyLevel.value_to_name[cl_enum].encode() return options def set_source_default(self): @@ -157,8 +155,8 @@ def get(self, key=opt[2]): def set(self, value, key=opt[2]): if value is not None: # normalize text here so it doesn't have to be done every time we get options map - if isinstance(value, six.text_type) and not isinstance(value, six.binary_type): - value = six.b(value) + if isinstance(value, str): + value = value.encode() self._graph_options[key] = value else: self._graph_options.pop(key, None) @@ -278,7 +276,7 @@ def __getattr__(self, attr): raise AttributeError("Result has no top-level attribute %r" % (attr,)) def __getitem__(self, item): - if isinstance(self.value, dict) and isinstance(item, six.string_types): + if isinstance(self.value, dict) and isinstance(item, str): return self.value[item] elif isinstance(self.value, list) and isinstance(item, int): return self.value[item] diff --git a/cassandra/datastax/insights/registry.py b/cassandra/datastax/insights/registry.py index 3dd1d255ae..03daebd86e 100644 --- a/cassandra/datastax/insights/registry.py +++ b/cassandra/datastax/insights/registry.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six from collections import OrderedDict from warnings import warn @@ -59,7 +58,7 @@ def _get_serializer(self, cls): try: return self._mapping_dict[cls] except KeyError: - for registered_cls, serializer in six.iteritems(self._mapping_dict): + for registered_cls, serializer in self._mapping_dict.items(): if issubclass(cls, registered_cls): return self._mapping_dict[registered_cls] raise ValueError diff --git a/cassandra/datastax/insights/reporter.py b/cassandra/datastax/insights/reporter.py index b05a88deb0..83205fc458 100644 --- a/cassandra/datastax/insights/reporter.py +++ b/cassandra/datastax/insights/reporter.py @@ -24,7 +24,6 @@ import sys from threading import Event, Thread import time -import six from cassandra.policies import HostDistance from cassandra.util import ms_timestamp_from_datetime @@ -199,9 +198,9 @@ def _get_startup_data(self): }, 'platformInfo': { 'os': { - 'name': uname_info.system if six.PY3 else uname_info[0], - 'version': uname_info.release if six.PY3 else uname_info[2], - 'arch': uname_info.machine if six.PY3 else uname_info[4] + 'name': uname_info.system, + 'version': uname_info.release, + 'arch': uname_info.machine }, 'cpus': { 'length': multiprocessing.cpu_count(), diff --git a/cassandra/datastax/insights/serializers.py b/cassandra/datastax/insights/serializers.py index aec4467a6a..289c165e8a 100644 --- a/cassandra/datastax/insights/serializers.py +++ b/cassandra/datastax/insights/serializers.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - def initialize_registry(insights_registry): # This will be called from the cluster module, so we put all this behavior @@ -203,8 +201,8 @@ def graph_options_insights_serializer(options): 'language': options.graph_language, 'graphProtocol': options.graph_protocol } - updates = {k: v.decode('utf-8') for k, v in six.iteritems(rv) - if isinstance(v, six.binary_type)} + updates = {k: v.decode('utf-8') for k, v in rv.items() + if isinstance(v, bytes)} rv.update(updates) return rv diff --git a/cassandra/deserializers.pyx b/cassandra/deserializers.pyx index 7de6949099..7c256674b0 100644 --- a/cassandra/deserializers.pyx +++ b/cassandra/deserializers.pyx @@ -29,8 +29,6 @@ from uuid import UUID from cassandra import cqltypes from cassandra import util -cdef bint PY2 = six.PY2 - cdef class Deserializer: """Cython-based deserializer class for a cqltype""" @@ -90,8 +88,6 @@ cdef class DesAsciiType(Deserializer): cdef deserialize(self, Buffer *buf, int protocol_version): if buf.size == 0: return "" - if PY2: - return to_bytes(buf) return to_bytes(buf).decode('ascii') diff --git a/cassandra/encoder.py b/cassandra/encoder.py index f2c3f8dfed..188739b00f 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -27,17 +27,11 @@ import sys import types from uuid import UUID -import six +import ipaddress from cassandra.util import (OrderedDict, OrderedMap, OrderedMapSerializedKey, sortedset, Time, Date, Point, LineString, Polygon) -if six.PY3: - import ipaddress - -if six.PY3: - long = int - def cql_quote(term): # The ordering of this method is important for the result of this method to @@ -45,10 +39,6 @@ def cql_quote(term): if isinstance(term, str): return "'%s'" % str(term).replace("'", "''") - # This branch of the if statement will only be used by Python 2 to catch - # unicode strings, text_type is used to prevent type errors with Python 3. - elif isinstance(term, six.text_type): - return "'%s'" % term.encode('utf8').replace("'", "''") else: return str(term) @@ -97,21 +87,13 @@ def __init__(self): Polygon: self.cql_encode_str_quoted } - if six.PY2: - self.mapping.update({ - unicode: self.cql_encode_unicode, - buffer: self.cql_encode_bytes, - long: self.cql_encode_object, - types.NoneType: self.cql_encode_none, - }) - else: - self.mapping.update({ - memoryview: self.cql_encode_bytes, - bytes: self.cql_encode_bytes, - type(None): self.cql_encode_none, - ipaddress.IPv4Address: self.cql_encode_ipaddress, - ipaddress.IPv6Address: self.cql_encode_ipaddress - }) + self.mapping.update({ + memoryview: self.cql_encode_bytes, + bytes: self.cql_encode_bytes, + type(None): self.cql_encode_none, + ipaddress.IPv4Address: self.cql_encode_ipaddress, + ipaddress.IPv6Address: self.cql_encode_ipaddress + }) def cql_encode_none(self, val): """ @@ -134,16 +116,8 @@ def cql_encode_str(self, val): def cql_encode_str_quoted(self, val): return "'%s'" % val - if six.PY3: - def cql_encode_bytes(self, val): - return (b'0x' + hexlify(val)).decode('utf-8') - elif sys.version_info >= (2, 7): - def cql_encode_bytes(self, val): # noqa - return b'0x' + hexlify(val) - else: - # python 2.6 requires string or read-only buffer for hexlify - def cql_encode_bytes(self, val): # noqa - return b'0x' + hexlify(buffer(val)) + def cql_encode_bytes(self, val): + return (b'0x' + hexlify(val)).decode('utf-8') def cql_encode_object(self, val): """ @@ -169,7 +143,7 @@ def cql_encode_datetime(self, val): with millisecond precision. """ timestamp = calendar.timegm(val.utctimetuple()) - return str(long(timestamp * 1e3 + getattr(val, 'microsecond', 0) / 1e3)) + return str(int(timestamp * 1e3 + getattr(val, 'microsecond', 0) / 1e3)) def cql_encode_date(self, val): """ @@ -214,7 +188,7 @@ def cql_encode_map_collection(self, val): return '{%s}' % ', '.join('%s: %s' % ( self.mapping.get(type(k), self.cql_encode_object)(k), self.mapping.get(type(v), self.cql_encode_object)(v) - ) for k, v in six.iteritems(val)) + ) for k, v in val.items()) def cql_encode_list_collection(self, val): """ @@ -236,14 +210,13 @@ def cql_encode_all_types(self, val, as_text_type=False): if :attr:`~Encoder.mapping` does not contain an entry for the type. """ encoded = self.mapping.get(type(val), self.cql_encode_object)(val) - if as_text_type and not isinstance(encoded, six.text_type): + if as_text_type and not isinstance(encoded, str): return encoded.decode('utf-8') return encoded - if six.PY3: - def cql_encode_ipaddress(self, val): - """ - Converts an ipaddress (IPV4Address, IPV6Address) to a CQL string. This - is suitable for ``inet`` type columns. - """ - return "'%s'" % val.compressed + def cql_encode_ipaddress(self, val): + """ + Converts an ipaddress (IPV4Address, IPV6Address) to a CQL string. This + is suitable for ``inet`` type columns. + """ + return "'%s'" % val.compressed diff --git a/cassandra/io/asyncorereactor.py b/cassandra/io/asyncorereactor.py index 95b2e1aa42..c62d7fa70e 100644 --- a/cassandra/io/asyncorereactor.py +++ b/cassandra/io/asyncorereactor.py @@ -24,7 +24,6 @@ import sys import ssl -from six.moves import range try: from weakref import WeakSet diff --git a/cassandra/io/eventletreactor.py b/cassandra/io/eventletreactor.py index 162661f468..42874036d5 100644 --- a/cassandra/io/eventletreactor.py +++ b/cassandra/io/eventletreactor.py @@ -23,8 +23,6 @@ from threading import Event import time -from six.moves import xrange - from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager try: from eventlet.green.OpenSSL import SSL @@ -190,5 +188,5 @@ def handle_read(self): def push(self, data): chunk_size = self.out_buffer_size - for i in xrange(0, len(data), chunk_size): + for i in range(0, len(data), chunk_size): self._write_queue.put(data[i:i + chunk_size]) diff --git a/cassandra/io/geventreactor.py b/cassandra/io/geventreactor.py index ebc664d485..4f1f158aa7 100644 --- a/cassandra/io/geventreactor.py +++ b/cassandra/io/geventreactor.py @@ -20,7 +20,6 @@ import logging import time -from six.moves import range from cassandra.connection import Connection, ConnectionShutdown, Timer, TimerManager diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index f4908f49fb..02a374cc91 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -21,7 +21,6 @@ from threading import Lock, Thread import time -from six.moves import range from cassandra.connection import (Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager) diff --git a/cassandra/marshal.py b/cassandra/marshal.py index 43cb627b08..726f0819eb 100644 --- a/cassandra/marshal.py +++ b/cassandra/marshal.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import struct @@ -45,35 +44,16 @@ def _make_packer(format_string): v3_header_unpack = v3_header_struct.unpack -if six.PY3: - def byte2int(b): - return b - - - def varint_unpack(term): - val = int(''.join("%02x" % i for i in term), 16) - if (term[0] & 128) != 0: - len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code - val -= 1 << (len_term * 8) - return val -else: - def byte2int(b): - return ord(b) - - - def varint_unpack(term): # noqa - val = int(term.encode('hex'), 16) - if (ord(term[0]) & 128) != 0: - len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code - val = val - (1 << (len_term * 8)) - return val +def varint_unpack(term): + val = int(''.join("%02x" % i for i in term), 16) + if (term[0] & 128) != 0: + len_term = len(term) # pulling this out of the expression to avoid overflow in cython optimized code + val -= 1 << (len_term * 8) + return val def bit_length(n): - if six.PY3 or isinstance(n, int): - return int.bit_length(n) - else: - return long.bit_length(n) + return int.bit_length(n) def varint_pack(big): @@ -91,7 +71,7 @@ def varint_pack(big): if pos and revbytes[-1] & 0x80: revbytes.append(0) revbytes.reverse() - return six.binary_type(revbytes) + return bytes(revbytes) point_be = struct.Struct('>dd') @@ -113,7 +93,7 @@ def vints_unpack(term): # noqa values = [] n = 0 while n < len(term): - first_byte = byte2int(term[n]) + first_byte = term[n] if (first_byte & 128) == 0: val = first_byte @@ -124,7 +104,7 @@ def vints_unpack(term): # noqa while n < end: n += 1 val <<= 8 - val |= byte2int(term[n]) & 0xff + val |= term[n] & 0xff n += 1 values.append(decode_zig_zag(val)) @@ -162,4 +142,4 @@ def vints_pack(values): revbytes.append(abs(v)) revbytes.reverse() - return six.binary_type(revbytes) + return bytes(revbytes) diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 9ef24b981d..d30e6a1925 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -15,13 +15,12 @@ from binascii import unhexlify from bisect import bisect_left from collections import defaultdict +from collections.abc import Mapping from functools import total_ordering from hashlib import md5 import json import logging import re -import six -from six.moves import zip import sys from threading import RLock import struct @@ -43,7 +42,6 @@ from cassandra.util import OrderedDict, Version from cassandra.pool import HostDistance from cassandra.connection import EndPoint -from cassandra.compat import Mapping from cassandra.tablets import Tablets log = logging.getLogger(__name__) @@ -296,7 +294,7 @@ def rebuild_token_map(self, partitioner, token_map): token_to_host_owner = {} ring = [] - for host, token_strings in six.iteritems(token_map): + for host, token_strings in token_map.items(): for token_string in token_strings: token = token_class.from_string(token_string) ring.append(token) @@ -377,7 +375,7 @@ def get_host_by_host_id(self, host_id): return self._hosts.get(host_id) def _get_host_by_address(self, address, port=None): - for host in six.itervalues(self._hosts): + for host in self._hosts.values(): if (host.broadcast_rpc_address == address and (port is None or host.broadcast_rpc_port is None or host.broadcast_rpc_port == port)): return host @@ -418,8 +416,7 @@ def __new__(metacls, name, bases, dct): -@six.add_metaclass(ReplicationStrategyTypeType) -class _ReplicationStrategy(object): +class _ReplicationStrategy(object, metaclass=ReplicationStrategyTypeType): options_map = None @classmethod @@ -658,7 +655,7 @@ def make_token_replica_map(self, token_to_host_owner, ring): racks_this_dc = dc_racks[dc] hosts_this_dc = len(hosts_per_dc[dc]) - for token_offset_index in six.moves.range(index, index+num_tokens): + for token_offset_index in range(index, index+num_tokens): if token_offset_index >= len(token_offsets): token_offset_index = token_offset_index - len(token_offsets) @@ -885,7 +882,7 @@ def _add_table_metadata(self, table_metadata): # note the intentional order of add before remove # this makes sure the maps are never absent something that existed before this update - for index_name, index_metadata in six.iteritems(table_metadata.indexes): + for index_name, index_metadata in table_metadata.indexes.items(): self.indexes[index_name] = index_metadata for index_name in (n for n in old_indexes if n not in table_metadata.indexes): @@ -1372,7 +1369,7 @@ def _all_as_cql(self): if self.extensions: registry = _RegisteredExtensionType._extension_registry - for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey + for k in registry.keys() & self.extensions: # no viewkeys on OrderedMapSerializeKey ext = registry[k] cql = ext.after_table_cql(self, k, self.extensions[k]) if cql: @@ -1588,8 +1585,7 @@ def __new__(mcs, name, bases, dct): return cls -@six.add_metaclass(_RegisteredExtensionType) -class RegisteredTableExtension(TableExtensionInterface): +class RegisteredTableExtension(TableExtensionInterface, metaclass=_RegisteredExtensionType): """ Extending this class registers it by name (associated by key in the `system_schema.tables.extensions` map). """ @@ -1895,7 +1891,7 @@ class MD5Token(HashToken): @classmethod def hash_fn(cls, key): - if isinstance(key, six.text_type): + if isinstance(key, str): key = key.encode('UTF-8') return abs(varint_unpack(md5(key).digest())) @@ -1909,7 +1905,7 @@ class BytesToken(Token): def from_string(cls, token_string): """ `token_string` should be the string representation from the server. """ # unhexlify works fine with unicode input in everythin but pypy3, where it Raises "TypeError: 'str' does not support the buffer interface" - if isinstance(token_string, six.text_type): + if isinstance(token_string, str): token_string = token_string.encode('ascii') # The BOP stores a hex string return cls(unhexlify(token_string)) @@ -3054,17 +3050,17 @@ def _build_table_graph_metadata(table_meta): try: # Make sure we process vertices before edges - for table_meta in [t for t in six.itervalues(keyspace_meta.tables) + for table_meta in [t for t in keyspace_meta.tables.values() if t.name in self.keyspace_table_vertex_rows[keyspace_meta.name]]: _build_table_graph_metadata(table_meta) # all other tables... - for table_meta in [t for t in six.itervalues(keyspace_meta.tables) + for table_meta in [t for t in keyspace_meta.tables.values() if t.name not in self.keyspace_table_vertex_rows[keyspace_meta.name]]: _build_table_graph_metadata(table_meta) except Exception: # schema error, remove all graph metadata for this keyspace - for t in six.itervalues(keyspace_meta.tables): + for t in keyspace_meta.tables.values(): t.edge = t.vertex = None keyspace_meta._exc_info = sys.exc_info() log.exception("Error while parsing graph metadata for keyspace %s", keyspace_meta.name) @@ -3278,7 +3274,7 @@ def as_cql_query(self, formatted=False): if self.extensions: registry = _RegisteredExtensionType._extension_registry - for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey + for k in registry.keys() & self.extensions: # no viewkeys on OrderedMapSerializeKey ext = registry[k] cql = ext.after_table_cql(self, k, self.extensions[k]) if cql: diff --git a/cassandra/murmur3.py b/cassandra/murmur3.py index 7c8d641b32..282c43578d 100644 --- a/cassandra/murmur3.py +++ b/cassandra/murmur3.py @@ -1,4 +1,3 @@ -from six.moves import range import struct diff --git a/cassandra/protocol.py b/cassandra/protocol.py index b1ab4707db..53a4938d0d 100644 --- a/cassandra/protocol.py +++ b/cassandra/protocol.py @@ -18,8 +18,6 @@ import socket from uuid import UUID -import six -from six.moves import range import io from cassandra import OperationType, ProtocolVersion @@ -85,8 +83,7 @@ def __init__(cls, name, bases, dct): register_class(cls) -@six.add_metaclass(_RegisterMessageType) -class _MessageType(object): +class _MessageType(object, metaclass=_RegisterMessageType): tracing = False custom_payload = None @@ -139,8 +136,6 @@ def recv_body(cls, f, protocol_version, protocol_features, *args): def summary_msg(self): msg = 'Error from server: code=%04x [%s] message="%s"' \ % (self.code, self.summary, self.message) - if six.PY2 and isinstance(msg, six.text_type): - msg = msg.encode('utf-8') return msg def __str__(self): @@ -161,8 +156,7 @@ def __init__(cls, name, bases, dct): error_classes[cls.error_code] = cls -@six.add_metaclass(ErrorMessageSubclass) -class ErrorMessageSub(ErrorMessage): +class ErrorMessageSub(ErrorMessage, metaclass=ErrorMessageSubclass): error_code = None @@ -1362,7 +1356,7 @@ def read_binary_string(f): def write_string(f, s): - if isinstance(s, six.text_type): + if isinstance(s, str): s = s.encode('utf8') write_short(f, len(s)) f.write(s) @@ -1379,7 +1373,7 @@ def read_longstring(f): def write_longstring(f, s): - if isinstance(s, six.text_type): + if isinstance(s, str): s = s.encode('utf8') write_int(f, len(s)) f.write(s) diff --git a/cassandra/query.py b/cassandra/query.py index a15aadb629..bd8ccd888d 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -23,8 +23,6 @@ import re import struct import time -import six -from six.moves import range, zip import warnings from cassandra import ConsistencyLevel, OperationTimedOut @@ -814,7 +812,7 @@ def add(self, statement, parameters=None): Like with other statements, parameters must be a sequence, even if there is only one item. """ - if isinstance(statement, six.string_types): + if isinstance(statement, str): if parameters: encoder = Encoder() if self._session is None else self._session.encoder statement = bind_params(statement, parameters, encoder) @@ -898,10 +896,8 @@ def __str__(self): def bind_params(query, params, encoder): - if six.PY2 and isinstance(query, six.text_type): - query = query.encode('utf-8') if isinstance(params, dict): - return query % dict((k, encoder.cql_encode_all_types(v)) for k, v in six.iteritems(params)) + return query % dict((k, encoder.cql_encode_all_types(v)) for k, v in params.items()) else: return query % tuple(encoder.cql_encode_all_types(v) for v in params) diff --git a/cassandra/scylla/cloud.py b/cassandra/scylla/cloud.py index 3ddce06bf1..c3298b199a 100644 --- a/cassandra/scylla/cloud.py +++ b/cassandra/scylla/cloud.py @@ -20,7 +20,6 @@ from contextlib import contextmanager from itertools import islice -import six import yaml from cassandra.connection import SniEndPointFactory @@ -105,7 +104,7 @@ def create_ssl_context(self): for data_center in self.data_centers.values(): with file_or_memory(path=data_center.get('certificateAuthorityPath'), data=data_center.get('certificateAuthorityData')) as cafile: - ssl_context.load_verify_locations(cadata=six.text_type(open(cafile).read())) + ssl_context.load_verify_locations(cadata=open(cafile).read()) with file_or_memory(path=self.auth_info.get('clientCertificatePath'), data=self.auth_info.get('clientCertificateData')) as certfile, \ file_or_memory(path=self.auth_info.get('clientKeyPath'), data=self.auth_info.get('clientKeyData')) as keyfile: @@ -118,13 +117,10 @@ def create_pyopenssl_context(self): try: from OpenSSL import SSL except ImportError as e: - six.reraise( - ImportError, - ImportError( - "PyOpenSSL must be installed to connect to scylla-cloud with the Eventlet or Twisted event loops"), - sys.exc_info()[2] - ) - ssl_context = SSL.Context(SSL.TLS_CLIENT_METHOD) + raise ImportError( + "PyOpenSSL must be installed to connect to scylla-cloud with the Eventlet or Twisted event loops") \ + .with_traceback(e.__traceback__) + ssl_context = SSL.Context(SSL.TLS_METHOD) ssl_context.set_verify(SSL.VERIFY_PEER, callback=lambda _1, _2, _3, _4, ok: True if self.skip_tls_verify else ok) for data_center in self.data_centers.values(): with file_or_memory(path=data_center.get('certificateAuthorityPath'), diff --git a/cassandra/segment.py b/cassandra/segment.py index e3881c4402..78161fe520 100644 --- a/cassandra/segment.py +++ b/cassandra/segment.py @@ -13,7 +13,6 @@ # limitations under the License. import zlib -import six from cassandra import DriverException from cassandra.marshal import int32_pack @@ -54,9 +53,6 @@ def compute_crc24(data, length): def compute_crc32(data, value): crc32 = zlib.crc32(data, value) - if six.PY2: - crc32 &= 0xffffffff - return crc32 diff --git a/cassandra/util.py b/cassandra/util.py index dd5c58b01d..3109dafa4c 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -14,13 +14,14 @@ from __future__ import with_statement import calendar +from collections.abc import Mapping import datetime from functools import total_ordering import logging from itertools import chain +import pickle import random import re -import six import uuid import sys @@ -789,10 +790,6 @@ def _find_insertion(self, x): sortedset = SortedSet # backwards-compatibility -from cassandra.compat import Mapping -from six.moves import cPickle - - class OrderedMap(Mapping): ''' An ordered map that accepts non-hashable types for keys. It also maintains the @@ -835,7 +832,7 @@ def __init__(self, *args, **kwargs): for k, v in e: self._insert(k, v) - for k, v in six.iteritems(kwargs): + for k, v in kwargs.items(): self._insert(k, v) def _insert(self, key, value): @@ -901,7 +898,7 @@ def popitem(self): raise KeyError() def _serialize_key(self, key): - return cPickle.dumps(key) + return pickle.dumps(key) class OrderedMapSerializedKey(OrderedMap): @@ -922,9 +919,6 @@ def _serialize_key(self, key): import datetime import time -if six.PY3: - long = int - @total_ordering class Time(object): @@ -951,11 +945,11 @@ def __init__(self, value): - datetime.time: built-in time - string_type: a string time of the form "HH:MM:SS[.mmmuuunnn]" """ - if isinstance(value, six.integer_types): + if isinstance(value, int): self._from_timestamp(value) elif isinstance(value, datetime.time): self._from_time(value) - elif isinstance(value, six.string_types): + elif isinstance(value, str): self._from_timestring(value) else: raise TypeError('Time arguments must be a whole number, datetime.time, or string') @@ -1031,7 +1025,7 @@ def __eq__(self, other): if isinstance(other, Time): return self.nanosecond_time == other.nanosecond_time - if isinstance(other, six.integer_types): + if isinstance(other, int): return self.nanosecond_time == other return self.nanosecond_time % Time.MICRO == 0 and \ @@ -1080,11 +1074,11 @@ def __init__(self, value): - datetime.date: built-in date - string_type: a string time of the form "yyyy-mm-dd" """ - if isinstance(value, six.integer_types): + if isinstance(value, int): self.days_from_epoch = value elif isinstance(value, (datetime.date, datetime.datetime)): self._from_timetuple(value.timetuple()) - elif isinstance(value, six.string_types): + elif isinstance(value, str): self._from_datestring(value) else: raise TypeError('Date arguments must be a whole number, datetime.date, or string') @@ -1124,7 +1118,7 @@ def __eq__(self, other): if isinstance(other, Date): return self.days_from_epoch == other.days_from_epoch - if isinstance(other, six.integer_types): + if isinstance(other, int): return self.days_from_epoch == other try: @@ -1688,7 +1682,7 @@ def __init__(self, value, precision): if value is None: milliseconds = None - elif isinstance(value, six.integer_types): + elif isinstance(value, int): milliseconds = value elif isinstance(value, datetime.datetime): value = value.replace( @@ -1956,12 +1950,10 @@ def __init__(self, version): try: self.major = int(parts.pop()) - except ValueError: - six.reraise( - ValueError, - ValueError("Couldn't parse version {}. Version should start with a number".format(version)), - sys.exc_info()[2] - ) + except ValueError as e: + raise ValueError( + "Couldn't parse version {}. Version should start with a number".format(version))\ + .with_traceback(e.__traceback__) try: self.minor = int(parts.pop()) if parts else 0 self.patch = int(parts.pop()) if parts else 0 @@ -1994,8 +1986,8 @@ def __str__(self): @staticmethod def _compare_version_part(version, other_version, cmp): - if not (isinstance(version, six.integer_types) and - isinstance(other_version, six.integer_types)): + if not (isinstance(version, int) and + isinstance(other_version, int)): version = str(version) other_version = str(other_version) diff --git a/docs/installation.rst b/docs/installation.rst index 4996a02c1b..64e00c8c40 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -188,7 +188,7 @@ If your sudo configuration does not allow SETENV, you must push the option flag applies these options to all dependencies (which break on the custom flag). Therefore, you must first install dependencies, then use install-option:: - sudo pip install six futures + sudo pip install futures sudo pip install --install-option="--no-cython" diff --git a/examples/concurrent_executions/execute_async_with_queue.py b/examples/concurrent_executions/execute_async_with_queue.py index 60d2a69c3c..72d2c101cb 100644 --- a/examples/concurrent_executions/execute_async_with_queue.py +++ b/examples/concurrent_executions/execute_async_with_queue.py @@ -19,7 +19,7 @@ import time import uuid -from six.moves import queue +import queue from cassandra.cluster import Cluster diff --git a/requirements.txt b/requirements.txt index 732bba1018..100a12905a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1 @@ geomet>=0.1,<0.3 -six >=1.9 diff --git a/setup.py b/setup.py index 791c8923da..7b30dff022 100644 --- a/setup.py +++ b/setup.py @@ -417,8 +417,7 @@ def run_setup(extensions): else: sys.stderr.write("Bypassing Cython setup requirement\n") - dependencies = ['six >=1.9', - 'geomet>=0.1,<0.3', + dependencies = ['geomet>=0.1,<0.3', 'pyyaml > 5.0'] _EXTRAS_REQUIRE = { diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 7826f4bcf9..54358d79b4 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -32,7 +32,6 @@ from threading import Event from subprocess import call from itertools import groupby -import six import shutil import pytest @@ -676,7 +675,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, if os.name == "nt": if CCM_CLUSTER: - for node in six.itervalues(CCM_CLUSTER.nodes): + for node in CCM_CLUSTER.nodes.items(): os.system("taskkill /F /PID " + str(node.pid)) else: call(["pkill", "-9", "-f", ".ccm"]) diff --git a/tests/integration/advanced/__init__.py b/tests/integration/advanced/__init__.py index e2fa1a4a4a..dffaccd190 100644 --- a/tests/integration/advanced/__init__.py +++ b/tests/integration/advanced/__init__.py @@ -14,7 +14,7 @@ import unittest -from six.moves.urllib.request import build_opener, Request, HTTPHandler +from urllib.request import build_opener, Request, HTTPHandler import re import os import time diff --git a/tests/integration/advanced/graph/__init__.py b/tests/integration/advanced/graph/__init__.py index 6c9458dd02..91c9287e11 100644 --- a/tests/integration/advanced/graph/__init__.py +++ b/tests/integration/advanced/graph/__init__.py @@ -22,7 +22,6 @@ import datetime from cassandra.util import Point, LineString, Polygon, Duration -import six from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT from cassandra.cluster import GraphAnalyticsExecutionProfile, GraphExecutionProfile, EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, \ @@ -457,15 +456,11 @@ def datatypes(): "duration1": ["Duration()", datetime.timedelta(1, 16, 0), GraphSON1Deserializer.deserialize_duration], "duration2": ["Duration()", datetime.timedelta(days=1, seconds=16, milliseconds=15), - GraphSON1Deserializer.deserialize_duration] + GraphSON1Deserializer.deserialize_duration], + "blob3": ["Blob()", bytes(b"Hello World Again"), GraphSON1Deserializer.deserialize_blob], + "blob4": ["Blob()", memoryview(b"And Again Hello World"), GraphSON1Deserializer.deserialize_blob] } - if six.PY2: - data["blob2"] = ["Blob()", buffer(b"Hello World"), GraphSON1Deserializer.deserialize_blob] - else: - data["blob3"] = ["Blob()", bytes(b"Hello World Again"), GraphSON1Deserializer.deserialize_blob] - data["blob4"] = ["Blob()", memoryview(b"And Again Hello World"), GraphSON1Deserializer.deserialize_blob] - if DSE_VERSION >= Version("5.1"): data["time1"] = ["Time()", datetime.time(12, 6, 12, 444), GraphSON1Deserializer.deserialize_time] data["time2"] = ["Time()", datetime.time(12, 6, 12), GraphSON1Deserializer.deserialize_time] @@ -965,7 +960,7 @@ def generate_tests(cls, schema=None, graphson=None, traversal=False): """Generate tests for a graph configuration""" def decorator(klass): if DSE_VERSION: - predicate = inspect.ismethod if six.PY2 else inspect.isfunction + predicate = inspect.isfunction for name, func in inspect.getmembers(klass, predicate=predicate): if not name.startswith('_test'): continue @@ -984,7 +979,7 @@ def generate_schema_tests(cls, schema=None): """Generate schema tests for a graph configuration""" def decorator(klass): if DSE_VERSION: - predicate = inspect.ismethod if six.PY2 else inspect.isfunction + predicate = inspect.isfunction for name, func in inspect.getmembers(klass, predicate=predicate): if not name.startswith('_test'): continue @@ -1026,7 +1021,7 @@ def __init__(self, properties): @property def non_pk_properties(self): - return {p: v for p, v in six.iteritems(self.properties) if p != 'pkid'} + return {p: v for p, v in self.properties.items() if p != 'pkid'} class GraphSchema(object): @@ -1134,7 +1129,7 @@ def clear(session): @classmethod def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFILE_GRAPH_DEFAULT): statements = ["schema.propertyKey('pkid').Int().ifNotExists().create();"] - for k, v in six.iteritems(vertex_label.non_pk_properties): + for k, v in vertex_label.non_pk_properties.items(): typ = cls.sanitize_type(v) statements.append("schema.propertyKey('{name}').{type}.create();".format( name=k, type=typ @@ -1142,7 +1137,7 @@ def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFI statements.append("schema.vertexLabel('{label}').partitionKey('pkid').properties(".format( label=vertex_label.label)) - property_names = [name for name in six.iterkeys(vertex_label.non_pk_properties)] + property_names = [name for name in vertex_label.non_pk_properties.keys()] statements.append(", ".join(["'{}'".format(p) for p in property_names])) statements.append(").create();") @@ -1189,7 +1184,7 @@ def create_vertex_label(cls, session, vertex_label, execution_profile=EXEC_PROFI statements = ["schema.vertexLabel('{label}').partitionBy('pkid', Int)".format( label=vertex_label.label)] - for name, typ in six.iteritems(vertex_label.non_pk_properties): + for name, typ in vertex_label.non_pk_properties.items(): typ = cls.sanitize_type(typ) statements.append(".property('{name}', {type})".format(name=name, type=typ)) statements.append(".create();") diff --git a/tests/integration/advanced/graph/fluent/__init__.py b/tests/integration/advanced/graph/fluent/__init__.py index 3962029f45..bde726c297 100644 --- a/tests/integration/advanced/graph/fluent/__init__.py +++ b/tests/integration/advanced/graph/fluent/__init__.py @@ -14,7 +14,6 @@ import sys import datetime -import six import time from collections import namedtuple from packaging.version import Version @@ -457,10 +456,10 @@ def _test_udt_with_namedtuples(self, schema, graphson): def _write_and_read_data_types(self, schema, graphson, use_schema=True): g = self.fetch_traversal_source(graphson) ep = self.get_execution_profile(graphson) - for data in six.itervalues(schema.fixtures.datatypes()): + for data in schema.fixtures.datatypes().values(): typ, value, deserializer = data vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(vertex_label.non_pk_properties.keys()) if use_schema or schema is CoreGraphSchema: schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) @@ -536,9 +535,9 @@ def __test_udt(self, schema, graphson, address_class, address_with_tags_class, } g = self.fetch_traversal_source(graphson) - for typ, value in six.itervalues(data): + for typ, value in data.values(): vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(vertex_label.non_pk_properties.keys()) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id). \ @@ -597,7 +596,7 @@ def _validate_prop(key, value, unittest): elif any(key.startswith(t) for t in ('Linestring',)): typ = LineString elif any(key.startswith(t) for t in ('neg',)): - typ = six.string_types + typ = str elif any(key.startswith(t) for t in ('date',)): typ = datetime.date elif any(key.startswith(t) for t in ('time',)): diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index d46a74a146..190292e6fe 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six - from cassandra import cluster from cassandra.cluster import ContinuousPagingOptions from cassandra.datastax.graph.fluent import DseGraph @@ -120,10 +118,10 @@ def _send_batch_and_read_results(self, schema, graphson, add_all=False, use_sche ep = self.get_execution_profile(graphson) batch = DseGraph.batch(session=self.session, execution_profile=self.get_execution_profile(graphson, traversal=True)) - for data in six.itervalues(datatypes): + for data in datatypes.values(): typ, value, deserializer = data vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(vertex_label.non_pk_properties.keys()) values[property_name] = value if use_schema or schema is CoreGraphSchema: schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) diff --git a/tests/integration/advanced/graph/test_graph.py b/tests/integration/advanced/graph/test_graph.py index 277283ea5a..7f55229911 100644 --- a/tests/integration/advanced/graph/test_graph.py +++ b/tests/integration/advanced/graph/test_graph.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import re from cassandra import OperationTimedOut, InvalidRequest diff --git a/tests/integration/advanced/graph/test_graph_datatype.py b/tests/integration/advanced/graph/test_graph_datatype.py index 0445ce8030..1159527a32 100644 --- a/tests/integration/advanced/graph/test_graph_datatype.py +++ b/tests/integration/advanced/graph/test_graph_datatype.py @@ -15,7 +15,6 @@ import unittest import time -import six import logging from packaging.version import Version from collections import namedtuple @@ -67,13 +66,13 @@ def _validate_type(self, vertex): if any(type_indicator.startswith(t) for t in ('int', 'short', 'long', 'bigint', 'decimal', 'smallint', 'varint')): - typ = six.integer_types + typ = int elif any(type_indicator.startswith(t) for t in ('float', 'double')): typ = float elif any(type_indicator.startswith(t) for t in ('duration', 'date', 'negdate', 'time', 'blob', 'timestamp', 'point', 'linestring', 'polygon', 'inet', 'uuid')): - typ = six.text_type + typ = str else: pass self.fail("Received unexpected type: %s" % type_indicator) @@ -85,10 +84,10 @@ class GenericGraphDataTypeTest(GraphUnitTestCase): def _test_all_datatypes(self, schema, graphson): ep = self.get_execution_profile(graphson) - for data in six.itervalues(schema.fixtures.datatypes()): + for data in schema.fixtures.datatypes().values(): typ, value, deserializer = data vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(vertex_label.non_pk_properties.keys()) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] @@ -167,9 +166,9 @@ def __test_udt(self, schema, graphson, address_class, address_with_tags_class, ), 'hello')] } - for typ, value in six.itervalues(data): + for typ, value in data.values(): vertex_label = VertexLabel([typ]) - property_name = next(six.iterkeys(vertex_label.non_pk_properties)) + property_name = next(vertex_label.non_pk_properties.keys()) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index 9bc23e611a..fe65f616a3 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -14,7 +14,6 @@ import sys -import six from packaging.version import Version from copy import copy @@ -83,7 +82,7 @@ def test_consistency_passing(self): res = s.execute_graph("null") for k, v in cl.items(): - self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], six.b(ConsistencyLevel.value_to_name[v])) + self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], ConsistencyLevel.value_to_name[v].encode()) # passed profile values override session defaults cl = {0: ConsistencyLevel.ALL, 1: ConsistencyLevel.QUORUM} @@ -97,7 +96,7 @@ def test_consistency_passing(self): res = s.execute_graph("null", execution_profile=tmp_profile) for k, v in cl.items(): - self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], six.b(ConsistencyLevel.value_to_name[v])) + self.assertEqual(res.response_future.message.custom_payload[graph_params[k]], ConsistencyLevel.value_to_name[v].encode()) finally: default_profile.graph_options = default_graph_opts @@ -588,7 +587,7 @@ def _test_basic_query_with_type_wrapper(self, schema, graphson): vl = VertexLabel(['tupleOf(Int, Bigint)']) schema.create_vertex_label(self.session, vl, execution_profile=ep) - prop_name = next(six.iterkeys(vl.non_pk_properties)) + prop_name = next(vl.non_pk_properties.keys()) with self.assertRaises(InvalidRequest): schema.add_vertex(self.session, vl, prop_name, (1, 42), execution_profile=ep) diff --git a/tests/integration/advanced/test_cont_paging.py b/tests/integration/advanced/test_cont_paging.py index 2e75d7061d..99de82647d 100644 --- a/tests/integration/advanced/test_cont_paging.py +++ b/tests/integration/advanced/test_cont_paging.py @@ -21,7 +21,6 @@ import unittest from itertools import cycle, count -from six.moves import range from packaging.version import Version import time diff --git a/tests/integration/cloud/test_cloud.py b/tests/integration/cloud/test_cloud.py index 13c43d18ea..80fd6cf863 100644 --- a/tests/integration/cloud/test_cloud.py +++ b/tests/integration/cloud/test_cloud.py @@ -20,7 +20,6 @@ import unittest -import six from ssl import SSLContext, PROTOCOL_TLS from cassandra import DriverException, ConsistencyLevel, InvalidRequest @@ -114,10 +113,7 @@ def test_error_when_bundle_doesnt_exist(self): try: self.connect('/invalid/path/file.zip') except Exception as e: - if six.PY2: - self.assertIsInstance(e, IOError) - else: - self.assertIsInstance(e, FileNotFoundError) + self.assertIsInstance(e, FileNotFoundError) def test_load_balancing_policy_is_dcawaretokenlbp(self): self.connect(self.creds) @@ -163,7 +159,7 @@ def test_default_consistency(self): self.assertEqual(self.session.default_consistency_level, ConsistencyLevel.LOCAL_QUORUM) # Verify EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT, # EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT, EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT - for ep_key in six.iterkeys(self.cluster.profile_manager.profiles): + for ep_key in self.cluster.profile_manager.profiles.keys(): ep = self.cluster.profile_manager.profiles[ep_key] self.assertEqual( ep.consistency_level, diff --git a/tests/integration/cqlengine/columns/test_container_columns.py b/tests/integration/cqlengine/columns/test_container_columns.py index 2acf36457b..1f51770eac 100644 --- a/tests/integration/cqlengine/columns/test_container_columns.py +++ b/tests/integration/cqlengine/columns/test_container_columns.py @@ -15,7 +15,6 @@ from datetime import datetime, timedelta import json import logging -import six import sys import traceback from uuid import uuid4 @@ -48,7 +47,7 @@ class JsonTestColumn(columns.Column): def to_python(self, value): if value is None: return - if isinstance(value, six.string_types): + if isinstance(value, str): return json.loads(value) else: return value diff --git a/tests/integration/cqlengine/columns/test_value_io.py b/tests/integration/cqlengine/columns/test_value_io.py index 2c82fe16f7..758ca714a6 100644 --- a/tests/integration/cqlengine/columns/test_value_io.py +++ b/tests/integration/cqlengine/columns/test_value_io.py @@ -16,7 +16,6 @@ from datetime import datetime, timedelta, time from decimal import Decimal from uuid import uuid1, uuid4, UUID -import six from cassandra.cqlengine import columns from cassandra.cqlengine.management import sync_table @@ -101,15 +100,15 @@ def test_column_io(self): class TestBlobIO(BaseColumnIOTest): column = columns.Blob - pkey_val = six.b('blake'), uuid4().bytes - data_val = six.b('eggleston'), uuid4().bytes + pkey_val = b'blake', uuid4().bytes + data_val = b'eggleston', uuid4().bytes class TestBlobIO2(BaseColumnIOTest): column = columns.Blob - pkey_val = bytearray(six.b('blake')), uuid4().bytes - data_val = bytearray(six.b('eggleston')), uuid4().bytes + pkey_val = bytearray(b'blake'), uuid4().bytes + data_val = bytearray(b'eggleston'), uuid4().bytes class TestTextIO(BaseColumnIOTest): diff --git a/tests/integration/cqlengine/management/test_compaction_settings.py b/tests/integration/cqlengine/management/test_compaction_settings.py index 63161643f8..e7d280a24b 100644 --- a/tests/integration/cqlengine/management/test_compaction_settings.py +++ b/tests/integration/cqlengine/management/test_compaction_settings.py @@ -14,7 +14,6 @@ import copy from mock import patch -import six from cassandra.cqlengine import columns from cassandra.cqlengine.management import drop_table, sync_table, _get_table_metadata, _update_options @@ -110,7 +109,7 @@ def _verify_options(self, table_meta, expected_options): cql = table_meta.export_as_string() for name, value in expected_options.items(): - if isinstance(value, six.string_types): + if isinstance(value, str): self.assertIn("%s = '%s'" % (name, value), cql) else: start = cql.find("%s = {" % (name,)) diff --git a/tests/integration/cqlengine/management/test_management.py b/tests/integration/cqlengine/management/test_management.py index a758a89f0a..edff6373c3 100644 --- a/tests/integration/cqlengine/management/test_management.py +++ b/tests/integration/cqlengine/management/test_management.py @@ -13,7 +13,6 @@ # limitations under the License. import unittest -import six import mock import logging from packaging.version import Version diff --git a/tests/integration/cqlengine/model/test_class_construction.py b/tests/integration/cqlengine/model/test_class_construction.py index f764e78e5c..dae97c4438 100644 --- a/tests/integration/cqlengine/model/test_class_construction.py +++ b/tests/integration/cqlengine/model/test_class_construction.py @@ -15,7 +15,6 @@ from uuid import uuid4 import warnings -import six from cassandra.cqlengine import columns, CQLEngineException from cassandra.cqlengine.models import Model, ModelException, ModelDefinitionException, ColumnQueryEvaluator from cassandra.cqlengine.query import ModelQuerySet, DMLQuery diff --git a/tests/integration/cqlengine/operators/test_where_operators.py b/tests/integration/cqlengine/operators/test_where_operators.py index 555af11025..1e0134dbac 100644 --- a/tests/integration/cqlengine/operators/test_where_operators.py +++ b/tests/integration/cqlengine/operators/test_where_operators.py @@ -27,8 +27,6 @@ from tests.integration.cqlengine.operators import check_lookup from tests.integration import greaterthanorequalcass30 -import six - class TestWhereOperators(unittest.TestCase): @@ -47,15 +45,15 @@ def test_symbol_lookup(self): def test_operator_rendering(self): """ tests symbols are rendered properly """ - self.assertEqual("=", six.text_type(EqualsOperator())) - self.assertEqual("!=", six.text_type(NotEqualsOperator())) - self.assertEqual("IN", six.text_type(InOperator())) - self.assertEqual(">", six.text_type(GreaterThanOperator())) - self.assertEqual(">=", six.text_type(GreaterThanOrEqualOperator())) - self.assertEqual("<", six.text_type(LessThanOperator())) - self.assertEqual("<=", six.text_type(LessThanOrEqualOperator())) - self.assertEqual("CONTAINS", six.text_type(ContainsOperator())) - self.assertEqual("LIKE", six.text_type(LikeOperator())) + self.assertEqual("=", str(EqualsOperator())) + self.assertEqual("!=", str(NotEqualsOperator())) + self.assertEqual("IN", str(InOperator())) + self.assertEqual(">", str(GreaterThanOperator())) + self.assertEqual(">=", str(GreaterThanOrEqualOperator())) + self.assertEqual("<", str(LessThanOperator())) + self.assertEqual("<=", str(LessThanOrEqualOperator())) + self.assertEqual("CONTAINS", str(ContainsOperator())) + self.assertEqual("LIKE", str(LikeOperator())) class TestIsNotNull(BaseCassEngTestCase): diff --git a/tests/integration/cqlengine/statements/test_base_statement.py b/tests/integration/cqlengine/statements/test_base_statement.py index 25ed0c9cb4..0c95504b13 100644 --- a/tests/integration/cqlengine/statements/test_base_statement.py +++ b/tests/integration/cqlengine/statements/test_base_statement.py @@ -14,7 +14,6 @@ import unittest from uuid import uuid4 -import six from cassandra.query import FETCH_SIZE_UNSET from cassandra.cqlengine.statements import BaseCQLStatement @@ -128,7 +127,7 @@ def test_like_operator(self): ss = SelectStatement(self.table_name) like_clause = "text_for_%" ss.add_where(Column(db_field='text'), LikeOperator(), like_clause) - self.assertEqual(six.text_type(ss), + self.assertEqual(str(ss), 'SELECT * FROM {} WHERE "text" LIKE %(0)s'.format(self.table_name)) result = execute(ss) diff --git a/tests/integration/cqlengine/statements/test_delete_statement.py b/tests/integration/cqlengine/statements/test_delete_statement.py index 5e2894a06b..745881f42f 100644 --- a/tests/integration/cqlengine/statements/test_delete_statement.py +++ b/tests/integration/cqlengine/statements/test_delete_statement.py @@ -17,7 +17,6 @@ from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import DeleteStatement, WhereClause, MapDeleteClause, ConditionalClause from cassandra.cqlengine.operators import * -import six class DeleteStatementTests(TestCase): @@ -31,24 +30,24 @@ def test_single_field_is_listified(self): def test_field_rendering(self): """ tests that fields are properly added to the select statement """ ds = DeleteStatement('table', ['f1', 'f2']) - self.assertTrue(six.text_type(ds).startswith('DELETE "f1", "f2"'), six.text_type(ds)) + self.assertTrue(str(ds).startswith('DELETE "f1", "f2"'), str(ds)) self.assertTrue(str(ds).startswith('DELETE "f1", "f2"'), str(ds)) def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ ds = DeleteStatement('table', None) - self.assertTrue(six.text_type(ds).startswith('DELETE FROM'), six.text_type(ds)) + self.assertTrue(str(ds).startswith('DELETE FROM'), str(ds)) self.assertTrue(str(ds).startswith('DELETE FROM'), str(ds)) def test_table_rendering(self): ds = DeleteStatement('table', None) - self.assertTrue(six.text_type(ds).startswith('DELETE FROM table'), six.text_type(ds)) + self.assertTrue(str(ds).startswith('DELETE FROM table'), str(ds)) self.assertTrue(str(ds).startswith('DELETE FROM table'), str(ds)) def test_where_clause_rendering(self): ds = DeleteStatement('table', None) ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "a" = %(0)s', str(ds)) def test_context_update(self): ds = DeleteStatement('table', None) @@ -56,7 +55,7 @@ def test_context_update(self): ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') ds.update_context_id(7) - self.assertEqual(six.text_type(ds), 'DELETE "d"[%(8)s] FROM table WHERE "a" = %(7)s') + self.assertEqual(str(ds), 'DELETE "d"[%(8)s] FROM table WHERE "a" = %(7)s') self.assertEqual(ds.get_context(), {'7': 'b', '8': 3}) def test_context(self): @@ -69,23 +68,23 @@ def test_range_deletion_rendering(self): ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') ds.add_where(Column(db_field='created_at'), GreaterThanOrEqualOperator(), '0') ds.add_where(Column(db_field='created_at'), LessThanOrEqualOperator(), '10') - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" >= %(1)s AND "created_at" <= %(2)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" >= %(1)s AND "created_at" <= %(2)s', str(ds)) ds = DeleteStatement('table', None) ds.add_where(Column(db_field='a'), EqualsOperator(), 'b') ds.add_where(Column(db_field='created_at'), InOperator(), ['0', '10', '20']) - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" IN %(1)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "a" = %(0)s AND "created_at" IN %(1)s', str(ds)) ds = DeleteStatement('table', None) ds.add_where(Column(db_field='a'), NotEqualsOperator(), 'b') - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "a" != %(0)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "a" != %(0)s', str(ds)) def test_delete_conditional(self): where = [WhereClause('id', EqualsOperator(), 1)] conditionals = [ConditionalClause('f0', 'value0'), ConditionalClause('f1', 'value1')] ds = DeleteStatement('table', where=where, conditionals=conditionals) self.assertEqual(len(ds.conditionals), len(conditionals)) - self.assertEqual(six.text_type(ds), 'DELETE FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', str(ds)) fields = ['one', 'two'] ds = DeleteStatement('table', fields=fields, where=where, conditionals=conditionals) - self.assertEqual(six.text_type(ds), 'DELETE "one", "two" FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', six.text_type(ds)) + self.assertEqual(str(ds), 'DELETE "one", "two" FROM table WHERE "id" = %(0)s IF "f0" = %(1)s AND "f1" = %(2)s', str(ds)) diff --git a/tests/integration/cqlengine/statements/test_insert_statement.py b/tests/integration/cqlengine/statements/test_insert_statement.py index a1dcd08968..45485af912 100644 --- a/tests/integration/cqlengine/statements/test_insert_statement.py +++ b/tests/integration/cqlengine/statements/test_insert_statement.py @@ -13,8 +13,6 @@ # limitations under the License. import unittest -import six - from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import InsertStatement @@ -27,7 +25,7 @@ def test_statement(self): ist.add_assignment(Column(db_field='c'), 'd') self.assertEqual( - six.text_type(ist), + str(ist), 'INSERT INTO table ("a", "c") VALUES (%(0)s, %(1)s)' ) @@ -38,7 +36,7 @@ def test_context_update(self): ist.update_context_id(4) self.assertEqual( - six.text_type(ist), + str(ist), 'INSERT INTO table ("a", "c") VALUES (%(4)s, %(5)s)' ) ctx = ist.get_context() @@ -48,4 +46,4 @@ def test_additional_rendering(self): ist = InsertStatement('table', ttl=60) ist.add_assignment(Column(db_field='a'), 'b') ist.add_assignment(Column(db_field='c'), 'd') - self.assertIn('USING TTL 60', six.text_type(ist)) + self.assertIn('USING TTL 60', str(ist)) diff --git a/tests/integration/cqlengine/statements/test_select_statement.py b/tests/integration/cqlengine/statements/test_select_statement.py index c6d1ac69f4..26c9c804cb 100644 --- a/tests/integration/cqlengine/statements/test_select_statement.py +++ b/tests/integration/cqlengine/statements/test_select_statement.py @@ -16,7 +16,6 @@ from cassandra.cqlengine.columns import Column from cassandra.cqlengine.statements import SelectStatement, WhereClause from cassandra.cqlengine.operators import * -import six class SelectStatementTests(unittest.TestCase): @@ -28,42 +27,42 @@ def test_single_field_is_listified(self): def test_field_rendering(self): """ tests that fields are properly added to the select statement """ ss = SelectStatement('table', ['f1', 'f2']) - self.assertTrue(six.text_type(ss).startswith('SELECT "f1", "f2"'), six.text_type(ss)) + self.assertTrue(str(ss).startswith('SELECT "f1", "f2"'), str(ss)) self.assertTrue(str(ss).startswith('SELECT "f1", "f2"'), str(ss)) def test_none_fields_rendering(self): """ tests that a '*' is added if no fields are passed in """ ss = SelectStatement('table') - self.assertTrue(six.text_type(ss).startswith('SELECT *'), six.text_type(ss)) + self.assertTrue(str(ss).startswith('SELECT *'), str(ss)) self.assertTrue(str(ss).startswith('SELECT *'), str(ss)) def test_table_rendering(self): ss = SelectStatement('table') - self.assertTrue(six.text_type(ss).startswith('SELECT * FROM table'), six.text_type(ss)) + self.assertTrue(str(ss).startswith('SELECT * FROM table'), str(ss)) self.assertTrue(str(ss).startswith('SELECT * FROM table'), str(ss)) def test_where_clause_rendering(self): ss = SelectStatement('table') ss.add_where(Column(db_field='a'), EqualsOperator(), 'b') - self.assertEqual(six.text_type(ss), 'SELECT * FROM table WHERE "a" = %(0)s', six.text_type(ss)) + self.assertEqual(str(ss), 'SELECT * FROM table WHERE "a" = %(0)s', str(ss)) def test_count(self): ss = SelectStatement('table', count=True, limit=10, order_by='d') ss.add_where(Column(db_field='a'), EqualsOperator(), 'b') - self.assertEqual(six.text_type(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s LIMIT 10', six.text_type(ss)) - self.assertIn('LIMIT', six.text_type(ss)) - self.assertNotIn('ORDER', six.text_type(ss)) + self.assertEqual(str(ss), 'SELECT COUNT(*) FROM table WHERE "a" = %(0)s LIMIT 10', str(ss)) + self.assertIn('LIMIT', str(ss)) + self.assertNotIn('ORDER', str(ss)) def test_distinct(self): ss = SelectStatement('table', distinct_fields=['field2']) ss.add_where(Column(db_field='field1'), EqualsOperator(), 'b') - self.assertEqual(six.text_type(ss), 'SELECT DISTINCT "field2" FROM table WHERE "field1" = %(0)s', six.text_type(ss)) + self.assertEqual(str(ss), 'SELECT DISTINCT "field2" FROM table WHERE "field1" = %(0)s', str(ss)) ss = SelectStatement('table', distinct_fields=['field1', 'field2']) - self.assertEqual(six.text_type(ss), 'SELECT DISTINCT "field1", "field2" FROM table') + self.assertEqual(str(ss), 'SELECT DISTINCT "field1", "field2" FROM table') ss = SelectStatement('table', distinct_fields=['field1'], count=True) - self.assertEqual(six.text_type(ss), 'SELECT DISTINCT COUNT("field1") FROM table') + self.assertEqual(str(ss), 'SELECT DISTINCT COUNT("field1") FROM table') def test_context(self): ss = SelectStatement('table') @@ -89,20 +88,20 @@ def test_additional_rendering(self): limit=15, allow_filtering=True ) - qstr = six.text_type(ss) + qstr = str(ss) self.assertIn('LIMIT 15', qstr) self.assertIn('ORDER BY x, y', qstr) self.assertIn('ALLOW FILTERING', qstr) def test_limit_rendering(self): ss = SelectStatement('table', None, limit=10) - qstr = six.text_type(ss) + qstr = str(ss) self.assertIn('LIMIT 10', qstr) ss = SelectStatement('table', None, limit=0) - qstr = six.text_type(ss) + qstr = str(ss) self.assertNotIn('LIMIT', qstr) ss = SelectStatement('table', None, limit=None) - qstr = six.text_type(ss) + qstr = str(ss) self.assertNotIn('LIMIT', qstr) diff --git a/tests/integration/cqlengine/statements/test_update_statement.py b/tests/integration/cqlengine/statements/test_update_statement.py index 99105069dd..4429625bf4 100644 --- a/tests/integration/cqlengine/statements/test_update_statement.py +++ b/tests/integration/cqlengine/statements/test_update_statement.py @@ -18,7 +18,6 @@ from cassandra.cqlengine.statements import (UpdateStatement, WhereClause, AssignmentClause, SetUpdateClause, ListUpdateClause) -import six class UpdateStatementTests(unittest.TestCase): @@ -26,7 +25,7 @@ class UpdateStatementTests(unittest.TestCase): def test_table_rendering(self): """ tests that fields are properly added to the select statement """ us = UpdateStatement('table') - self.assertTrue(six.text_type(us).startswith('UPDATE table SET'), six.text_type(us)) + self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def test_rendering(self): @@ -34,10 +33,10 @@ def test_rendering(self): us.add_assignment(Column(db_field='a'), 'b') us.add_assignment(Column(db_field='c'), 'd') us.add_where(Column(db_field='a'), EqualsOperator(), 'x') - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', six.text_type(us)) + self.assertEqual(str(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', str(us)) us.add_where(Column(db_field='a'), NotEqualsOperator(), 'y') - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s AND "a" != %(3)s', six.text_type(us)) + self.assertEqual(str(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s AND "a" != %(3)s', str(us)) def test_context(self): us = UpdateStatement('table') @@ -52,19 +51,19 @@ def test_context_update(self): us.add_assignment(Column(db_field='c'), 'd') us.add_where(Column(db_field='a'), EqualsOperator(), 'x') us.update_context_id(3) - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s') + self.assertEqual(str(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment(Column(db_field='a'), 'b') us.add_where(Column(db_field='a'), EqualsOperator(), 'x') - self.assertIn('USING TTL 60', six.text_type(us)) + self.assertIn('USING TTL 60', str(us)) def test_update_set_add(self): us = UpdateStatement('table') us.add_update(Set(Text, db_field='a'), set((1,)), 'add') - self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s') + self.assertEqual(str(us), 'UPDATE table SET "a" = "a" + %(0)s') def test_update_empty_set_add_does_not_assign(self): us = UpdateStatement('table') diff --git a/tests/integration/cqlengine/statements/test_where_clause.py b/tests/integration/cqlengine/statements/test_where_clause.py index 21671be086..0090fa0123 100644 --- a/tests/integration/cqlengine/statements/test_where_clause.py +++ b/tests/integration/cqlengine/statements/test_where_clause.py @@ -13,7 +13,6 @@ # limitations under the License. import unittest -import six from cassandra.cqlengine.operators import EqualsOperator from cassandra.cqlengine.statements import StatementException, WhereClause @@ -30,7 +29,7 @@ def test_where_clause_rendering(self): wc = WhereClause('a', EqualsOperator(), 'c') wc.set_context_id(5) - self.assertEqual('"a" = %(5)s', six.text_type(wc), six.text_type(wc)) + self.assertEqual('"a" = %(5)s', str(wc), str(wc)) self.assertEqual('"a" = %(5)s', str(wc), type(wc)) def test_equality_method(self): diff --git a/tests/integration/cqlengine/test_batch_query.py b/tests/integration/cqlengine/test_batch_query.py index 405326b5bc..cd440df291 100644 --- a/tests/integration/cqlengine/test_batch_query.py +++ b/tests/integration/cqlengine/test_batch_query.py @@ -13,9 +13,6 @@ # limitations under the License. import warnings -import six -import sure - from cassandra.cqlengine import columns from cassandra.cqlengine.management import drop_table, sync_table from cassandra.cqlengine.models import Model diff --git a/tests/integration/cqlengine/test_lwt_conditional.py b/tests/integration/cqlengine/test_lwt_conditional.py index f8459a95ad..45dbf86c68 100644 --- a/tests/integration/cqlengine/test_lwt_conditional.py +++ b/tests/integration/cqlengine/test_lwt_conditional.py @@ -14,7 +14,6 @@ import unittest import mock -import six from uuid import uuid4 from cassandra.cqlengine import columns @@ -113,7 +112,7 @@ def test_conditional_clause(self): tc = ConditionalClause('some_value', 23) tc.set_context_id(3) - self.assertEqual('"some_value" = %(3)s', six.text_type(tc)) + self.assertEqual('"some_value" = %(3)s', str(tc)) self.assertEqual('"some_value" = %(3)s', str(tc)) def test_batch_update_conditional(self): diff --git a/tests/integration/datatype_utils.py b/tests/integration/datatype_utils.py index 8a1c813baa..1f7fb50a05 100644 --- a/tests/integration/datatype_utils.py +++ b/tests/integration/datatype_utils.py @@ -14,8 +14,8 @@ from decimal import Decimal from datetime import datetime, date, time +import ipaddress from uuid import uuid1, uuid4 -import six from cassandra.util import OrderedMap, Date, Time, sortedset, Duration @@ -91,11 +91,10 @@ def get_sample_data(): sample_data[datatype] = 3.4028234663852886e+38 elif datatype == 'inet': - sample_data[datatype] = ('123.123.123.123', '2001:db8:85a3:8d3:1319:8a2e:370:7348') - if six.PY3: - import ipaddress - sample_data[datatype] += (ipaddress.IPv4Address("123.123.123.123"), - ipaddress.IPv6Address('2001:db8:85a3:8d3:1319:8a2e:370:7348')) + sample_data[datatype] = ('123.123.123.123', + '2001:db8:85a3:8d3:1319:8a2e:370:7348', + ipaddress.IPv4Address("123.123.123.123"), + ipaddress.IPv6Address('2001:db8:85a3:8d3:1319:8a2e:370:7348')) elif datatype == 'int': sample_data[datatype] = 2147483647 diff --git a/tests/integration/long/test_ipv6.py b/tests/integration/long/test_ipv6.py index 3e2f2ffc5e..4a741b70b3 100644 --- a/tests/integration/long/test_ipv6.py +++ b/tests/integration/long/test_ipv6.py @@ -13,7 +13,6 @@ # limitations under the License. import os, socket, errno -import six from ccmlib import common from cassandra.cluster import NoHostAvailable diff --git a/tests/integration/simulacron/test_connection.py b/tests/integration/simulacron/test_connection.py index 1def601d2e..d08676659f 100644 --- a/tests/integration/simulacron/test_connection.py +++ b/tests/integration/simulacron/test_connection.py @@ -14,7 +14,6 @@ import unittest import logging -import six import time from mock import Mock, patch diff --git a/tests/integration/simulacron/utils.py b/tests/integration/simulacron/utils.py index ba9573fd23..01d94fc539 100644 --- a/tests/integration/simulacron/utils.py +++ b/tests/integration/simulacron/utils.py @@ -15,7 +15,7 @@ import json import subprocess import time -from six.moves.urllib.request import build_opener, Request, HTTPHandler +from urllib.request import build_opener, Request, HTTPHandler from cassandra.metadata import SchemaParserV4, SchemaParserDSE68 diff --git a/tests/integration/standard/test_authentication.py b/tests/integration/standard/test_authentication.py index 2f8ffbb068..94f77a6916 100644 --- a/tests/integration/standard/test_authentication.py +++ b/tests/integration/standard/test_authentication.py @@ -16,7 +16,6 @@ import logging import time -import six from cassandra.cluster import NoHostAvailable from cassandra.auth import PlainTextAuthProvider, SASLClient, SaslAuthProvider diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 6d5e040e32..194d0aa18f 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -15,7 +15,6 @@ import unittest -import six from cassandra.query import BatchStatement from tests.integration import (use_singledc, PROTOCOL_VERSION, local, TestCluster, diff --git a/tests/integration/standard/test_concurrent.py b/tests/integration/standard/test_concurrent.py index 15da526bde..ba891b4bd0 100644 --- a/tests/integration/standard/test_concurrent.py +++ b/tests/integration/standard/test_concurrent.py @@ -24,8 +24,6 @@ from tests.integration import use_singledc, PROTOCOL_VERSION, TestCluster -from six import next - import unittest log = logging.getLogger(__name__) diff --git a/tests/integration/standard/test_connection.py b/tests/integration/standard/test_connection.py index 0220ffbb1a..a1b05c3d6f 100644 --- a/tests/integration/standard/test_connection.py +++ b/tests/integration/standard/test_connection.py @@ -17,7 +17,6 @@ from functools import partial from mock import patch import logging -from six.moves import range import sys import threading from threading import Thread, Event diff --git a/tests/integration/standard/test_custom_payload.py b/tests/integration/standard/test_custom_payload.py index fd0a94c419..1ca580fb3e 100644 --- a/tests/integration/standard/test_custom_payload.py +++ b/tests/integration/standard/test_custom_payload.py @@ -15,8 +15,6 @@ import unittest -import six - from cassandra.query import (SimpleStatement, BatchStatement, BatchType) from tests.integration import (use_singledc, PROTOCOL_VERSION, local, TestCluster, @@ -140,16 +138,16 @@ def validate_various_custom_payloads(self, statement): # Long key value pair key_value = "x" * 10 - custom_payload = {key_value: six.b(key_value)} + custom_payload = {key_value: key_value.encode()} self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) # Max supported value key pairs according C* binary protocol v4 should be 65534 (unsigned short max value) for i in range(65534): - custom_payload[str(i)] = six.b('x') + custom_payload[str(i)] = b'x' self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) # Add one custom payload to this is too many key value pairs and should fail - custom_payload[str(65535)] = six.b('x') + custom_payload[str(65535)] = b'x' with self.assertRaises(ValueError): self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload) diff --git a/tests/integration/standard/test_custom_protocol_handler.py b/tests/integration/standard/test_custom_protocol_handler.py index 3ec94b05d8..9f3a52e256 100644 --- a/tests/integration/standard/test_custom_protocol_handler.py +++ b/tests/integration/standard/test_custom_protocol_handler.py @@ -25,7 +25,6 @@ TestCluster, greaterthanorequalcass40, requirecassandra from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES from tests.integration.standard.utils import create_table_with_all_types, get_all_primitive_params -from six import binary_type import uuid import mock @@ -78,7 +77,7 @@ def test_custom_raw_uuid_row_results(self): session.client_protocol_handler = CustomTestRawRowType result_set = session.execute("SELECT schema_version FROM system.local") raw_value = result_set[0][0] - self.assertTrue(isinstance(raw_value, binary_type)) + self.assertTrue(isinstance(raw_value, bytes)) self.assertEqual(len(raw_value), 16) # Ensure that we get normal uuid back when we re-connect diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 39018ef5d8..86f48f88d5 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -17,7 +17,6 @@ from collections import defaultdict import difflib import logging -import six import sys import time import os @@ -1003,7 +1002,7 @@ class Ext1(Ext0): update_v = s.prepare('UPDATE system_schema.views SET extensions=? WHERE keyspace_name=? AND view_name=?') # extensions registered, one present # -------------------------------------- - ext_map = {Ext0.name: six.b("THA VALUE")} + ext_map = {Ext0.name: b"THA VALUE"} [(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v))) for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts self.cluster.refresh_table_metadata(ks, t) @@ -1025,8 +1024,8 @@ class Ext1(Ext0): # extensions registered, one present # -------------------------------------- - ext_map = {Ext0.name: six.b("THA VALUE"), - Ext1.name: six.b("OTHA VALUE")} + ext_map = {Ext0.name: b"THA VALUE", + Ext1.name: b"OTHA VALUE"} [(s.execute(update_t, (ext_map, ks, t)), s.execute(update_v, (ext_map, ks, v))) for _ in self.cluster.metadata.all_hosts()] # we're manipulating metadata - do it on all hosts self.cluster.refresh_table_metadata(ks, t) @@ -1094,7 +1093,7 @@ def test_export_schema(self): cluster = TestCluster() cluster.connect() - self.assertIsInstance(cluster.metadata.export_schema_as_string(), six.string_types) + self.assertIsInstance(cluster.metadata.export_schema_as_string(), str) cluster.shutdown() def test_export_keyspace_schema(self): @@ -1107,8 +1106,8 @@ def test_export_keyspace_schema(self): for keyspace in cluster.metadata.keyspaces: keyspace_metadata = cluster.metadata.keyspaces[keyspace] - self.assertIsInstance(keyspace_metadata.export_as_string(), six.string_types) - self.assertIsInstance(keyspace_metadata.as_cql_query(), six.string_types) + self.assertIsInstance(keyspace_metadata.export_as_string(), str) + self.assertIsInstance(keyspace_metadata.as_cql_query(), str) cluster.shutdown() def assert_equal_diff(self, received, expected): @@ -1288,8 +1287,8 @@ def test_replicas(self): cluster.connect('test3rf') - self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', six.b('key'))), []) - host = list(cluster.metadata.get_replicas('test3rf', six.b('key')))[0] + self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', b'key')), []) + host = list(cluster.metadata.get_replicas('test3rf', b'key'))[0] self.assertEqual(host.datacenter, 'dc1') self.assertEqual(host.rack, 'r1') cluster.shutdown() diff --git a/tests/integration/standard/test_query.py b/tests/integration/standard/test_query.py index fdab4e7a0a..bc05051318 100644 --- a/tests/integration/standard/test_query.py +++ b/tests/integration/standard/test_query.py @@ -35,7 +35,6 @@ import re import mock -import six log = logging.getLogger(__name__) @@ -461,10 +460,10 @@ def make_query_plan(self, working_keyspace=None, query=None): try: host = [live_hosts[self.host_index_to_use]] except IndexError as e: - six.raise_from(IndexError( + raise IndexError( 'You specified an index larger than the number of hosts. Total hosts: {}. Index specified: {}'.format( len(live_hosts), self.host_index_to_use - )), e) + )) from e return host diff --git a/tests/integration/standard/test_query_paging.py b/tests/integration/standard/test_query_paging.py index 8e0ca8becc..26c1ca0da6 100644 --- a/tests/integration/standard/test_query_paging.py +++ b/tests/integration/standard/test_query_paging.py @@ -19,7 +19,6 @@ import unittest from itertools import cycle, count -from six.moves import range from threading import Event from cassandra import ConsistencyLevel diff --git a/tests/integration/standard/test_single_interface.py b/tests/integration/standard/test_single_interface.py index c4fe4321bf..e836b5f428 100644 --- a/tests/integration/standard/test_single_interface.py +++ b/tests/integration/standard/test_single_interface.py @@ -14,8 +14,6 @@ import unittest -import six - from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement @@ -56,7 +54,7 @@ def test_single_interface(self): broadcast_rpc_ports = [] broadcast_ports = [] self.assertEqual(len(hosts), 3) - for endpoint, host in six.iteritems(hosts): + for endpoint, host in hosts.items(): self.assertEqual(endpoint.address, host.broadcast_rpc_address) self.assertEqual(endpoint.port, host.broadcast_rpc_port) diff --git a/tests/integration/standard/test_types.py b/tests/integration/standard/test_types.py index 4329574ba6..2377129e9d 100644 --- a/tests/integration/standard/test_types.py +++ b/tests/integration/standard/test_types.py @@ -15,9 +15,9 @@ import unittest from datetime import datetime +import ipaddress import math from packaging.version import Version -import six import cassandra from cassandra import InvalidRequest @@ -60,25 +60,7 @@ def test_can_insert_blob_type_as_string(self): params = ['key1', b'blobbyblob'] query = "INSERT INTO blobstring (a, b) VALUES (%s, %s)" - # In python2, with Cassandra > 2.0, we don't treat the 'byte str' type as a blob, so we'll encode it - # as a string literal and have the following failure. - if six.PY2 and self.cql_version >= (3, 1, 0): - # Blob values can't be specified using string notation in CQL 3.1.0 and - # above which is used by default in Cassandra 2.0. - if self.cass_version >= (2, 1, 0): - msg = r'.*Invalid STRING constant \(.*?\) for "b" of type blob.*' - else: - msg = r'.*Invalid STRING constant \(.*?\) for b of type blob.*' - self.assertRaisesRegex(InvalidRequest, msg, s.execute, query, params) - return - - # In python2, with Cassandra < 2.0, we can manually encode the 'byte str' type as hex for insertion in a blob. - if six.PY2: - cass_params = [params[0], params[1].encode('hex')] - s.execute(query, cass_params) - # In python 3, the 'bytes' type is treated as a blob, so we can correctly encode it with hex notation. - else: - s.execute(query, params) + s.execute(query, params) results = s.execute("SELECT * FROM blobstring")[0] for expected, actual in zip(params, results): @@ -176,10 +158,9 @@ def test_can_insert_primitive_datatypes(self): # verify data result = s.execute("SELECT {0} FROM alltypes WHERE zz=%s".format(single_columns_string), (key,))[0][1] compare_value = data_sample - if six.PY3: - import ipaddress - if isinstance(data_sample, ipaddress.IPv4Address) or isinstance(data_sample, ipaddress.IPv6Address): - compare_value = str(data_sample) + + if isinstance(data_sample, ipaddress.IPv4Address) or isinstance(data_sample, ipaddress.IPv6Address): + compare_value = str(data_sample) self.assertEqual(result, compare_value) # try the same thing with a prepared statement @@ -1107,7 +1088,7 @@ def _daterange_round_trip(self, to_insert, expected=None): dr = results[0].dr # sometimes this is truncated in the assertEqual output on failure; - if isinstance(expected, six.string_types): + if isinstance(expected, str): self.assertEqual(str(dr), expected) else: self.assertEqual(dr, expected or to_insert) @@ -1161,7 +1142,7 @@ def _daterange_round_trip(self, to_insert, expected=None): dr = results[0].dr # sometimes this is truncated in the assertEqual output on failure; - if isinstance(expected, six.string_types): + if isinstance(expected, str): self.assertEqual(str(dr), expected) else: self.assertEqual(dr, expected or to_insert) diff --git a/tests/integration/standard/test_udts.py b/tests/integration/standard/test_udts.py index 8cd6bc3c1b..a50f3f47de 100644 --- a/tests/integration/standard/test_udts.py +++ b/tests/integration/standard/test_udts.py @@ -15,7 +15,6 @@ import unittest from collections import namedtuple from functools import partial -import six from cassandra import InvalidRequest from cassandra.cluster import UserTypeDoesNotExist, ExecutionProfile, EXEC_PROFILE_DEFAULT @@ -287,9 +286,9 @@ def test_can_insert_udts_with_nulls(self): self.assertEqual((None, None, None, None), s.execute(select)[0].b) # also test empty strings - s.execute(insert, [User('', None, None, six.binary_type())]) + s.execute(insert, [User('', None, None, bytes())]) results = s.execute("SELECT b FROM mytable WHERE a=0") - self.assertEqual(('', None, None, six.binary_type()), results[0].b) + self.assertEqual(('', None, None, bytes()), results[0].b) c.shutdown() @@ -714,7 +713,7 @@ def test_type_alteration(self): s.execute("INSERT INTO %s (k, v) VALUES (0, {v0 : 3, v1 : 0xdeadbeef})" % (self.table_name,)) val = s.execute('SELECT v FROM %s' % self.table_name)[0][0] self.assertEqual(val['v0'], 3) - self.assertEqual(val['v1'], six.b('\xde\xad\xbe\xef')) + self.assertEqual(val['v1'], b'\xde\xad\xbe\xef') @lessthancass30 def test_alter_udt(self): diff --git a/tests/unit/advanced/cloud/test_cloud.py b/tests/unit/advanced/cloud/test_cloud.py index a7cd83a8ce..f253e70454 100644 --- a/tests/unit/advanced/cloud/test_cloud.py +++ b/tests/unit/advanced/cloud/test_cloud.py @@ -9,7 +9,6 @@ import tempfile import os import shutil -import six import unittest @@ -96,8 +95,7 @@ def clean_tmp_dir(): } # The directory is not writtable.. we expect a permission error - exc = PermissionError if six.PY3 else OSError - with self.assertRaises(exc): + with self.assertRaises(PermissionError): cloud.get_cloud_config(config) # With use_default_tempdir, we expect an connection refused diff --git a/tests/unit/advanced/test_graph.py b/tests/unit/advanced/test_graph.py index a98a48c82f..2870b9b1ee 100644 --- a/tests/unit/advanced/test_graph.py +++ b/tests/unit/advanced/test_graph.py @@ -17,8 +17,6 @@ import unittest -import six - from cassandra import ConsistencyLevel from cassandra.policies import RetryPolicy from cassandra.graph import (SimpleGraphStatement, GraphOptions, GraphProtocol, Result, @@ -278,7 +276,7 @@ def test_get_options(self): other = GraphOptions(**kwargs) options = base.get_options_map(other) updated = self.opt_mapping['graph_name'] - self.assertEqual(options[updated], six.b('unit_test')) + self.assertEqual(options[updated], b'unit_test') for name in (n for n in self.opt_mapping.values() if n != updated): self.assertEqual(options[name], base._graph_options[name]) @@ -288,22 +286,22 @@ def test_get_options(self): def test_set_attr(self): expected = 'test@@@@' opts = GraphOptions(graph_name=expected) - self.assertEqual(opts.graph_name, six.b(expected)) + self.assertEqual(opts.graph_name, expected.encode()) expected = 'somethingelse####' opts.graph_name = expected - self.assertEqual(opts.graph_name, six.b(expected)) + self.assertEqual(opts.graph_name, expected.encode()) # will update options with set value another = GraphOptions() self.assertIsNone(another.graph_name) another.update(opts) - self.assertEqual(another.graph_name, six.b(expected)) + self.assertEqual(another.graph_name, expected.encode()) opts.graph_name = None self.assertIsNone(opts.graph_name) # will not update another with its set-->unset value another.update(opts) - self.assertEqual(another.graph_name, six.b(expected)) # remains unset + self.assertEqual(another.graph_name, expected.encode()) # remains unset opt_map = another.get_options_map(opts) self.assertEqual(opt_map, another._graph_options) @@ -318,7 +316,7 @@ def _verify_api_params(self, opts, api_params): self.assertEqual(len(opts._graph_options), len(api_params)) for name, value in api_params.items(): try: - value = six.b(value) + value = value.encode() except: pass # already bytes self.assertEqual(getattr(opts, name), value) @@ -335,8 +333,8 @@ def test_consistency_levels(self): # mapping from base opt_map = opts.get_options_map() - self.assertEqual(opt_map['graph-read-consistency'], six.b(ConsistencyLevel.value_to_name[read_cl])) - self.assertEqual(opt_map['graph-write-consistency'], six.b(ConsistencyLevel.value_to_name[write_cl])) + self.assertEqual(opt_map['graph-read-consistency'], ConsistencyLevel.value_to_name[read_cl].encode()) + self.assertEqual(opt_map['graph-write-consistency'], ConsistencyLevel.value_to_name[write_cl].encode()) # empty by default new_opts = GraphOptions() @@ -346,8 +344,8 @@ def test_consistency_levels(self): # set from other opt_map = new_opts.get_options_map(opts) - self.assertEqual(opt_map['graph-read-consistency'], six.b(ConsistencyLevel.value_to_name[read_cl])) - self.assertEqual(opt_map['graph-write-consistency'], six.b(ConsistencyLevel.value_to_name[write_cl])) + self.assertEqual(opt_map['graph-read-consistency'], ConsistencyLevel.value_to_name[read_cl].encode()) + self.assertEqual(opt_map['graph-write-consistency'], ConsistencyLevel.value_to_name[write_cl].encode()) def test_graph_source_convenience_attributes(self): opts = GraphOptions() diff --git a/tests/unit/cqlengine/test_connection.py b/tests/unit/cqlengine/test_connection.py index 962ee06b52..56136b6e8b 100644 --- a/tests/unit/cqlengine/test_connection.py +++ b/tests/unit/cqlengine/test_connection.py @@ -14,8 +14,6 @@ import unittest -import six - from cassandra.cluster import _ConfigMode from cassandra.cqlengine import connection from cassandra.query import dict_factory diff --git a/tests/unit/io/utils.py b/tests/unit/io/utils.py index 09175ce8c1..0e8eec52aa 100644 --- a/tests/unit/io/utils.py +++ b/tests/unit/io/utils.py @@ -26,8 +26,7 @@ import random from functools import wraps from itertools import cycle -import six -from six import binary_type, BytesIO +from io import BytesIO from mock import Mock, MagicMock import errno @@ -202,7 +201,7 @@ def set_socket(self, connection, obj): return setattr(connection, self.socket_attr_name, obj) def make_header_prefix(self, message_class, version=2, stream_id=0): - return binary_type().join(map(uint8_pack, [ + return bytes().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), 0, # flags (compression) stream_id, @@ -230,7 +229,7 @@ def make_error_body(self, code, msg): write_string(buf, msg) return buf.getvalue() - def make_msg(self, header, body=binary_type()): + def make_msg(self, header, body=bytes()): return header + uint32_pack(len(body)) + body def test_successful_connection(self): @@ -289,7 +288,7 @@ def recv_side_effect(*args): c.process_io_buffer = Mock() def chunk(size): - return six.b('a') * size + return b'a' * size buf_size = c.in_buffer_size @@ -436,7 +435,7 @@ def test_partial_header_read(self): self.get_socket(c).recv.return_value = message[1:] c.handle_read(*self.null_handle_function_args) - self.assertEqual(six.binary_type(), c._io_buffer.io_buffer.getvalue()) + self.assertEqual(bytes(), c._io_buffer.io_buffer.getvalue()) # let it write out a StartupMessage c.handle_write(*self.null_handle_function_args) @@ -463,7 +462,7 @@ def test_partial_message_read(self): # ... then read in the rest self.get_socket(c).recv.return_value = message[9:] c.handle_read(*self.null_handle_function_args) - self.assertEqual(six.binary_type(), c._io_buffer.io_buffer.getvalue()) + self.assertEqual(bytes(), c._io_buffer.io_buffer.getvalue()) # let it write out a StartupMessage c.handle_write(*self.null_handle_function_args) @@ -499,7 +498,7 @@ def test_mixed_message_and_buffer_sizes(self): for i in range(1, 15): c.process_io_buffer.reset_mock() c._io_buffer._io_buffer = io.BytesIO() - message = io.BytesIO(six.b('a') * (2**i)) + message = io.BytesIO(b'a' * (2**i)) def recv_side_effect(*args): if random.randint(1,10) % 3 == 0: diff --git a/tests/unit/test_auth.py b/tests/unit/test_auth.py index 68cce526e7..0a2427c7ff 100644 --- a/tests/unit/test_auth.py +++ b/tests/unit/test_auth.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six from cassandra.auth import PlainTextAuthenticator import unittest @@ -24,6 +23,6 @@ class TestPlainTextAuthenticator(unittest.TestCase): def test_evaluate_challenge_with_unicode_data(self): authenticator = PlainTextAuthenticator("johnӁ", "doeӁ") self.assertEqual( - authenticator.evaluate_challenge(six.ensure_binary('PLAIN-START')), - six.ensure_binary("\x00johnӁ\x00doeӁ") + authenticator.evaluate_challenge(b'PLAIN-START'), + "\x00johnӁ\x00doeӁ".encode('utf-8') ) diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index 16487397c2..3334e650a5 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -14,7 +14,6 @@ import unittest import logging -import six from mock import patch, Mock @@ -303,7 +302,7 @@ def test_default_profile(self): rf = session.execute_async("query", execution_profile='non-default') self._verify_response_future_profile(rf, non_default_profile) - for name, ep in six.iteritems(cluster.profile_manager.profiles): + for name, ep in cluster.profile_manager.profiles.items(): self.assertEqual(ep, session.get_execution_profile(name)) # invalid ep diff --git a/tests/unit/test_concurrent.py b/tests/unit/test_concurrent.py index 9f67531a3c..3e84220b27 100644 --- a/tests/unit/test_concurrent.py +++ b/tests/unit/test_concurrent.py @@ -19,7 +19,7 @@ from mock import Mock import time import threading -from six.moves.queue import PriorityQueue +from queue import PriorityQueue import sys import platform diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py index bc6749a477..1d81376d4a 100644 --- a/tests/unit/test_connection.py +++ b/tests/unit/test_connection.py @@ -14,8 +14,7 @@ import unittest from mock import Mock, ANY, call, patch -import six -from six import BytesIO +from io import BytesIO import time from threading import Lock @@ -41,14 +40,14 @@ def make_connection(self): def make_header_prefix(self, message_class, version=Connection.protocol_version, stream_id=0): if Connection.protocol_version < 3: - return six.binary_type().join(map(uint8_pack, [ + return bytes().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), 0, # flags (compression) stream_id, message_class.opcode # opcode ])) else: - return six.binary_type().join(map(uint8_pack, [ + return bytes().join(map(uint8_pack, [ 0xff & (HEADER_DIRECTION_TO_CLIENT | version), 0, # flags (compression) 0, # MSB for v3+ stream diff --git a/tests/unit/test_control_connection.py b/tests/unit/test_control_connection.py index dc5b37d799..b389b1851b 100644 --- a/tests/unit/test_control_connection.py +++ b/tests/unit/test_control_connection.py @@ -14,8 +14,6 @@ import unittest -import six - from concurrent.futures import ThreadPoolExecutor from mock import Mock, ANY, call @@ -54,7 +52,7 @@ def __init__(self): def get_host(self, endpoint_or_address, port=None): if not isinstance(endpoint_or_address, EndPoint): - for host in six.itervalues(self.hosts): + for host in self.hosts.values(): if (host.address == endpoint_or_address and (port is None or host.broadcast_rpc_port is None or host.broadcast_rpc_port == port)): return host diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index b0a8b63b16..94fed13455 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -17,7 +17,6 @@ import logging from mock import Mock import os -import six import timeit import cassandra @@ -485,11 +484,11 @@ def test_murmur3_c(self): raise unittest.SkipTest('The cmurmur3 extension is not available') def _verify_hash(self, fn): - self.assertEqual(fn(six.b('123')), -7468325962851647638) + self.assertEqual(fn(b'123'), -7468325962851647638) self.assertEqual(fn(b'\x00\xff\x10\xfa\x99' * 10), 5837342703291459765) self.assertEqual(fn(b'\xfe' * 8), -8927430733708461935) self.assertEqual(fn(b'\x10' * 8), 1446172840243228796) - self.assertEqual(fn(six.b(str(cassandra.metadata.MAX_LONG))), 7162290910810015547) + self.assertEqual(fn(str(cassandra.metadata.MAX_LONG).encode()), 7162290910810015547) class MD5TokensTest(unittest.TestCase): @@ -504,28 +503,28 @@ def test_md5_tokens(self): class BytesTokensTest(unittest.TestCase): def test_bytes_tokens(self): - bytes_token = BytesToken(unhexlify(six.b('01'))) - self.assertEqual(bytes_token.value, six.b('\x01')) + bytes_token = BytesToken(unhexlify(b'01')) + self.assertEqual(bytes_token.value, b'\x01') self.assertEqual(str(bytes_token), "" % bytes_token.value) self.assertEqual(bytes_token.hash_fn('123'), '123') self.assertEqual(bytes_token.hash_fn(123), 123) self.assertEqual(bytes_token.hash_fn(str(cassandra.metadata.MAX_LONG)), str(cassandra.metadata.MAX_LONG)) def test_from_string(self): - from_unicode = BytesToken.from_string(six.text_type('0123456789abcdef')) - from_bin = BytesToken.from_string(six.b('0123456789abcdef')) + from_unicode = BytesToken.from_string('0123456789abcdef') + from_bin = BytesToken.from_string(b'0123456789abcdef') self.assertEqual(from_unicode, from_bin) - self.assertIsInstance(from_unicode.value, six.binary_type) - self.assertIsInstance(from_bin.value, six.binary_type) + self.assertIsInstance(from_unicode.value, bytes) + self.assertIsInstance(from_bin.value, bytes) def test_comparison(self): - tok = BytesToken.from_string(six.text_type('0123456789abcdef')) + tok = BytesToken.from_string('0123456789abcdef') token_high_order = uint16_unpack(tok.value[0:2]) self.assertLess(BytesToken(uint16_pack(token_high_order - 1)), tok) self.assertGreater(BytesToken(uint16_pack(token_high_order + 1)), tok) def test_comparison_unicode(self): - value = six.b('\'_-()"\xc2\xac') + value = b'\'_-()"\xc2\xac' t0 = BytesToken(value) t1 = BytesToken.from_string('00') self.assertGreater(t0, t1) @@ -642,7 +641,7 @@ class UnicodeIdentifiersTests(unittest.TestCase): Looking for encoding errors like PYTHON-447 """ - name = six.text_type(b'\'_-()"\xc2\xac'.decode('utf-8')) + name = b'\'_-()"\xc2\xac'.decode('utf-8') def test_keyspace_name(self): km = KeyspaceMetadata(self.name, False, 'SimpleStrategy', {'replication_factor': 1}) diff --git a/tests/unit/test_orderedmap.py b/tests/unit/test_orderedmap.py index 9ca5699204..5d99fc74a8 100644 --- a/tests/unit/test_orderedmap.py +++ b/tests/unit/test_orderedmap.py @@ -16,7 +16,6 @@ from cassandra.util import OrderedMap, OrderedMapSerializedKey from cassandra.cqltypes import EMPTY, UTF8Type, lookup_casstype -import six class OrderedMapTest(unittest.TestCase): def test_init(self): @@ -118,11 +117,11 @@ def test_iter(self): itr = iter(om) self.assertEqual(sum([1 for _ in itr]), len(keys)) - self.assertRaises(StopIteration, six.next, itr) + self.assertRaises(StopIteration, next, itr) self.assertEqual(list(iter(om)), keys) - self.assertEqual(list(six.iteritems(om)), items) - self.assertEqual(list(six.itervalues(om)), values) + self.assertEqual(list(om.items()), items) + self.assertEqual(list(om.values()), values) def test_len(self): self.assertEqual(len(OrderedMap()), 0) diff --git a/tests/unit/test_parameter_binding.py b/tests/unit/test_parameter_binding.py index 8820114dc3..78f3898e01 100644 --- a/tests/unit/test_parameter_binding.py +++ b/tests/unit/test_parameter_binding.py @@ -21,9 +21,6 @@ from cassandra.cqltypes import Int32Type from cassandra.util import OrderedDict -from six.moves import xrange -import six - class ParamBindingTest(unittest.TestCase): @@ -40,7 +37,7 @@ def test_sequence_param(self): self.assertEqual(result, "(1, 'a', 2.0)") def test_generator_param(self): - result = bind_params("%s", ((i for i in xrange(3)),), Encoder()) + result = bind_params("%s", ((i for i in range(3)),), Encoder()) self.assertEqual(result, "[0, 1, 2]") def test_none_param(self): @@ -149,7 +146,7 @@ def test_missing_value(self): def test_extra_value(self): self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': 0, 'should_not_be_here': 123}) # okay to have extra keys in dict - self.assertEqual(self.bound.values, [six.b('\x00') * 4] * 4) # four encoded zeros + self.assertEqual(self.bound.values, [b'\x00' * 4] * 4) # four encoded zeros self.assertRaises(ValueError, self.bound.bind, (0, 0, 0, 0, 123)) def test_values_none(self): diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index db9eae6324..877731dc08 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -17,8 +17,7 @@ from itertools import islice, cycle from mock import Mock, patch, call from random import randint -import six -from six.moves._thread import LockType +from _thread import LockType import sys import struct from threading import Thread @@ -37,8 +36,6 @@ from cassandra.connection import DefaultEndPoint, UnixSocketEndPoint from cassandra.query import Statement -from six.moves import xrange - class LoadBalancingPolicyTest(unittest.TestCase): def test_non_implemented(self): @@ -75,7 +72,7 @@ def test_multiple_query_plans(self): hosts = [0, 1, 2, 3] policy = RoundRobinPolicy() policy.populate(None, hosts) - for i in xrange(20): + for i in range(20): qplan = list(policy.make_query_plan()) self.assertEqual(sorted(qplan), hosts) @@ -121,17 +118,17 @@ def test_thread_safety_during_modification(self): def check_query_plan(): try: - for i in xrange(100): + for i in range(100): list(policy.make_query_plan()) except Exception as exc: errors.append(exc) def host_up(): - for i in xrange(1000): + for i in range(1000): policy.on_up(randint(0, 99)) def host_down(): - for i in xrange(1000): + for i in range(1000): policy.on_down(randint(0, 99)) threads = [] @@ -142,7 +139,7 @@ def host_down(): # make the GIL switch after every instruction, maximizing # the chance of race conditions - check = six.PY2 or '__pypy__' in sys.builtin_module_names + check = '__pypy__' in sys.builtin_module_names if check: original_interval = sys.getcheckinterval() else: diff --git a/tests/unit/test_protocol.py b/tests/unit/test_protocol.py index 0f251ffc0e..eec9d73ca4 100644 --- a/tests/unit/test_protocol.py +++ b/tests/unit/test_protocol.py @@ -14,7 +14,6 @@ import unittest -import six from mock import Mock from cassandra import ProtocolVersion, UnsupportedOperation diff --git a/tests/unit/test_query.py b/tests/unit/test_query.py index 2a2901aaff..8a3f00fa9d 100644 --- a/tests/unit/test_query.py +++ b/tests/unit/test_query.py @@ -14,8 +14,6 @@ import unittest -import six - from cassandra.query import BatchStatement, SimpleStatement @@ -25,7 +23,7 @@ class BatchStatementTest(unittest.TestCase): def test_clear(self): keyspace = 'keyspace' routing_key = 'routing_key' - custom_payload = {'key': six.b('value')} + custom_payload = {'key': b'value'} ss = SimpleStatement('whatever', keyspace=keyspace, routing_key=routing_key, custom_payload=custom_payload) diff --git a/tests/unit/test_response_future.py b/tests/unit/test_response_future.py index 82da9e0049..ef667d081b 100644 --- a/tests/unit/test_response_future.py +++ b/tests/unit/test_response_future.py @@ -17,7 +17,6 @@ from collections import deque from threading import RLock -import six from mock import Mock, MagicMock, ANY from cassandra import ConsistencyLevel, Unavailable, SchemaTargetType, SchemaChangeType, OperationTimedOut diff --git a/tests/unit/test_segment.py b/tests/unit/test_segment.py index f794b38b1d..0d0f146c16 100644 --- a/tests/unit/test_segment.py +++ b/tests/unit/test_segment.py @@ -14,7 +14,7 @@ import unittest -import six +from io import BytesIO from cassandra import DriverException from cassandra.segment import Segment, CrcException @@ -22,8 +22,6 @@ def to_bits(b): - if six.PY2: - b = six.byte2int(b) return '{:08b}'.format(b) class SegmentCodecTest(unittest.TestCase): @@ -50,7 +48,7 @@ def _header_to_bits(data): return bits[7:24] + bits[6:7] + bits[:6] def test_encode_uncompressed_header(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) self.assertEqual(buffer.tell(), 6) self.assertEqual( @@ -59,7 +57,7 @@ def test_encode_uncompressed_header(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_encode_compressed_header(self): - buffer = six.BytesIO() + buffer = BytesIO() compressed_length = len(segment_codec_lz4.compress(self.small_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.small_msg), True) @@ -69,7 +67,7 @@ def test_encode_compressed_header(self): "{:017b}".format(compressed_length) + "00000000000110010" + "1" + "00000") def test_encode_uncompressed_header_with_max_payload(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.max_msg), -1, True) self.assertEqual(buffer.tell(), 6) self.assertEqual( @@ -77,13 +75,13 @@ def test_encode_uncompressed_header_with_max_payload(self): "11111111111111111" + "1" + "000000") def test_encode_header_fails_if_payload_too_big(self): - buffer = six.BytesIO() + buffer = BytesIO() for codec in [c for c in [segment_codec_no_compression, segment_codec_lz4] if c is not None]: with self.assertRaises(DriverException): codec.encode_header(buffer, len(self.large_msg), -1, False) def test_encode_uncompressed_header_not_self_contained_msg(self): - buffer = six.BytesIO() + buffer = BytesIO() # simulate the first chunk with the max size segment_codec_no_compression.encode_header(buffer, len(self.max_msg), -1, False) self.assertEqual(buffer.tell(), 6) @@ -95,7 +93,7 @@ def test_encode_uncompressed_header_not_self_contained_msg(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_encode_compressed_header_with_max_payload(self): - buffer = six.BytesIO() + buffer = BytesIO() compressed_length = len(segment_codec_lz4.compress(self.max_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.max_msg), True) self.assertEqual(buffer.tell(), 8) @@ -105,7 +103,7 @@ def test_encode_compressed_header_with_max_payload(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_encode_compressed_header_not_self_contained_msg(self): - buffer = six.BytesIO() + buffer = BytesIO() # simulate the first chunk with the max size compressed_length = len(segment_codec_lz4.compress(self.max_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.max_msg), False) @@ -118,7 +116,7 @@ def test_encode_compressed_header_not_self_contained_msg(self): "00000")) def test_decode_uncompressed_header(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) buffer.seek(0) header = segment_codec_no_compression.decode_header(buffer) @@ -128,7 +126,7 @@ def test_decode_uncompressed_header(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_compressed_header(self): - buffer = six.BytesIO() + buffer = BytesIO() compressed_length = len(segment_codec_lz4.compress(self.small_msg)) segment_codec_lz4.encode_header(buffer, compressed_length, len(self.small_msg), True) buffer.seek(0) @@ -138,7 +136,7 @@ def test_decode_compressed_header(self): self.assertEqual(header.is_self_contained, True) def test_decode_header_fails_if_corrupted(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode_header(buffer, len(self.small_msg), -1, True) # corrupt one byte buffer.seek(buffer.tell()-1) @@ -149,7 +147,7 @@ def test_decode_header_fails_if_corrupted(self): segment_codec_no_compression.decode_header(buffer) def test_decode_uncompressed_self_contained_segment(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode(buffer, self.small_msg) buffer.seek(0) @@ -163,7 +161,7 @@ def test_decode_uncompressed_self_contained_segment(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_compressed_self_contained_segment(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_lz4.encode(buffer, self.small_msg) buffer.seek(0) @@ -176,7 +174,7 @@ def test_decode_compressed_self_contained_segment(self): self.assertEqual(segment.payload, self.small_msg) def test_decode_multi_segments(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_no_compression.encode(buffer, self.large_msg) buffer.seek(0) @@ -194,7 +192,7 @@ def test_decode_multi_segments(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_fails_if_corrupted(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_lz4.encode(buffer, self.small_msg) buffer.seek(buffer.tell()-1) buffer.write(b'0') @@ -205,7 +203,7 @@ def test_decode_fails_if_corrupted(self): @unittest.skipUnless(segment_codec_lz4, ' lz4 not installed') def test_decode_tiny_msg_not_compressed(self): - buffer = six.BytesIO() + buffer = BytesIO() segment_codec_lz4.encode(buffer, b'b') buffer.seek(0) header = segment_codec_lz4.decode_header(buffer) diff --git a/tests/unit/test_timestamps.py b/tests/unit/test_timestamps.py index fc1be071ad..ef8ac36f7b 100644 --- a/tests/unit/test_timestamps.py +++ b/tests/unit/test_timestamps.py @@ -15,7 +15,6 @@ import unittest import mock -import six from cassandra import timestamps from threading import Thread, Lock @@ -106,10 +105,7 @@ def assertLastCallArgRegex(self, call, pattern): last_warn_args, last_warn_kwargs = call self.assertEqual(len(last_warn_args), 1) self.assertEqual(len(last_warn_kwargs), 0) - six.assertRegex(self, - last_warn_args[0], - pattern, - ) + self.assertRegex(last_warn_args[0], pattern) def test_basic_log_content(self): """ diff --git a/tests/unit/test_types.py b/tests/unit/test_types.py index af3b327ef8..b77c9dcdb4 100644 --- a/tests/unit/test_types.py +++ b/tests/unit/test_types.py @@ -18,8 +18,6 @@ import time from binascii import unhexlify -import six - import cassandra from cassandra import util from cassandra.cqltypes import ( @@ -166,7 +164,7 @@ def __init__(self, subtypes, names): @classmethod def apply_parameters(cls, subtypes, names): - return cls(subtypes, [unhexlify(six.b(name)) if name is not None else name for name in names]) + return cls(subtypes, [unhexlify(name.encode()) if name is not None else name for name in names]) class BarType(FooType): typename = 'org.apache.cassandra.db.marshal.BarType' @@ -536,8 +534,8 @@ class no_bounds_object(object): self.assertRaises(ValueError, DateRangeType.serialize, no_bounds_object, 5) def test_serialized_value_round_trip(self): - vals = [six.b('\x01\x00\x00\x01%\xe9a\xf9\xd1\x06\x00\x00\x01v\xbb>o\xff\x00'), - six.b('\x01\x00\x00\x00\xdcm\x03-\xd1\x06\x00\x00\x01v\xbb>o\xff\x00')] + vals = [b'\x01\x00\x00\x01%\xe9a\xf9\xd1\x06\x00\x00\x01v\xbb>o\xff\x00', + b'\x01\x00\x00\x00\xdcm\x03-\xd1\x06\x00\x00\x01v\xbb>o\xff\x00'] for serialized in vals: self.assertEqual( serialized, diff --git a/tox.ini b/tox.ini index 6d94e11247..52db2b0c95 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,6 @@ envlist = py{27,35,36,37,38},pypy [base] deps = nose mock<=1.0.1 - six packaging cython eventlet From 246786450cbe6f906ccf369f209175f00acffa2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Wed, 19 Jul 2023 18:08:12 +0200 Subject: [PATCH 253/551] Remove mentions / workaround for unsupported Python versions There are some stale mentions in docs / comments about Python versions that are no longer supported. There are also some workarounds to make driver work with those versions. This commit removes all mentions and workarounds that I was able to find. --- CONTRIBUTING.rst | 1 - cassandra/encoder.py | 3 - cassandra/util.py | 246 ++-------------------------------- docs/installation.rst | 2 +- tests/integration/__init__.py | 1 - 5 files changed, 10 insertions(+), 243 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index cdd742c063..e5da81d74f 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -26,7 +26,6 @@ To protect the community, all contributors are required to `sign the DataStax Co Design and Implementation Guidelines ------------------------------------ -- We support Python 2.7+, so any changes must work in any of these runtimes (we use ``six``, ``futures``, and some internal backports for compatability) - We have integrations (notably Cassandra cqlsh) that require pure Python and minimal external dependencies. We try to avoid new external dependencies. Where compiled extensions are concerned, there should always be a pure Python fallback implementation. - This project follows `semantic versioning `_, so breaking API changes will only be introduced in major versions. - Legacy ``cqlengine`` has varying degrees of overreaching client-side validation. Going forward, we will avoid client validation where server feedback is adequate and not overly expensive. diff --git a/cassandra/encoder.py b/cassandra/encoder.py index 188739b00f..31d90549f4 100644 --- a/cassandra/encoder.py +++ b/cassandra/encoder.py @@ -34,9 +34,6 @@ def cql_quote(term): - # The ordering of this method is important for the result of this method to - # be a native str type (for both Python 2 and 3) - if isinstance(term, str): return "'%s'" % str(term).replace("'", "''") else: diff --git a/cassandra/util.py b/cassandra/util.py index 3109dafa4c..06d338f2e1 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -13,17 +13,22 @@ # limitations under the License. from __future__ import with_statement +from _weakref import ref import calendar +from collections import OrderedDict from collections.abc import Mapping import datetime from functools import total_ordering -import logging from itertools import chain +import keyword +import logging import pickle import random import re -import uuid +import socket import sys +import time +import uuid _HAS_GEOMET = True try: @@ -213,147 +218,6 @@ def _resolve_contact_points_to_string_map(contact_points): ) -try: - from collections import OrderedDict -except ImportError: - # OrderedDict from Python 2.7+ - - # Copyright (c) 2009 Raymond Hettinger - # - # Permission is hereby granted, free of charge, to any person - # obtaining a copy of this software and associated documentation files - # (the "Software"), to deal in the Software without restriction, - # including without limitation the rights to use, copy, modify, merge, - # publish, distribute, sublicense, and/or sell copies of the Software, - # and to permit persons to whom the Software is furnished to do so, - # subject to the following conditions: - # - # The above copyright notice and this permission notice shall be - # included in all copies or substantial portions of the Software. - # - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - # OTHER DEALINGS IN THE SOFTWARE. - from UserDict import DictMixin - - class OrderedDict(dict, DictMixin): # noqa - """ A dictionary which maintains the insertion order of keys. """ - - def __init__(self, *args, **kwds): - """ A dictionary which maintains the insertion order of keys. """ - - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - if last: - key = next(reversed(self)) - else: - key = next(iter(self)) - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - iterkeys = DictMixin.iterkeys - itervalues = DictMixin.itervalues - iteritems = DictMixin.iteritems - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - if len(self) != len(other): - return False - for p, q in zip(self.items(), other.items()): - if p != q: - return False - return True - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other - - -# WeakSet from Python 2.7+ (https://code.google.com/p/weakrefset) - -from _weakref import ref - - class _IterationGuard(object): # This context manager registers itself in the current iterators of the # weak container, such as to delay all removals until the context manager @@ -916,10 +780,6 @@ def _serialize_key(self, key): return self.cass_key_type.serialize(key, self.protocol_version) -import datetime -import time - - @total_ordering class Time(object): ''' @@ -1145,97 +1005,9 @@ def __str__(self): # If we overflow datetime.[MIN|MAX] return str(self.days_from_epoch) -import socket -if hasattr(socket, 'inet_pton'): - inet_pton = socket.inet_pton - inet_ntop = socket.inet_ntop -else: - """ - Windows doesn't have socket.inet_pton and socket.inet_ntop until Python 3.4 - This is an alternative impl using ctypes, based on this win_inet_pton project: - https://github.com/hickeroar/win_inet_pton - """ - import ctypes - - class sockaddr(ctypes.Structure): - """ - Shared struct for ipv4 and ipv6. - - https://msdn.microsoft.com/en-us/library/windows/desktop/ms740496(v=vs.85).aspx - - ``__pad1`` always covers the port. - When being used for ``sockaddr_in6``, ``ipv4_addr`` actually covers ``sin6_flowinfo``, resulting - in proper alignment for ``ipv6_addr``. - """ - _fields_ = [("sa_family", ctypes.c_short), - ("__pad1", ctypes.c_ushort), - ("ipv4_addr", ctypes.c_byte * 4), - ("ipv6_addr", ctypes.c_byte * 16), - ("__pad2", ctypes.c_ulong)] - - if hasattr(ctypes, 'windll'): - WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA - WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA - else: - def not_windows(*args): - raise OSError("IPv6 addresses cannot be handled on Windows. " - "Missing ctypes.windll") - WSAStringToAddressA = not_windows - WSAAddressToStringA = not_windows - - def inet_pton(address_family, ip_string): - if address_family == socket.AF_INET: - return socket.inet_aton(ip_string) - - addr = sockaddr() - addr.sa_family = address_family - addr_size = ctypes.c_int(ctypes.sizeof(addr)) - - if WSAStringToAddressA( - ip_string, - address_family, - None, - ctypes.byref(addr), - ctypes.byref(addr_size) - ) != 0: - raise socket.error(ctypes.FormatError()) - - if address_family == socket.AF_INET6: - return ctypes.string_at(addr.ipv6_addr, 16) - - raise socket.error('unknown address family') - - def inet_ntop(address_family, packed_ip): - if address_family == socket.AF_INET: - return socket.inet_ntoa(packed_ip) - - addr = sockaddr() - addr.sa_family = address_family - addr_size = ctypes.c_int(ctypes.sizeof(addr)) - ip_string = ctypes.create_string_buffer(128) - ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string)) - - if address_family == socket.AF_INET6: - if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr): - raise socket.error('packed IP wrong length for inet_ntoa') - ctypes.memmove(addr.ipv6_addr, packed_ip, 16) - else: - raise socket.error('unknown address family') - - if WSAAddressToStringA( - ctypes.byref(addr), - addr_size, - None, - ip_string, - ctypes.byref(ip_string_size) - ) != 0: - raise socket.error(ctypes.FormatError()) - - return ip_string[:ip_string_size.value - 1] - - -import keyword +inet_pton = socket.inet_pton +inet_ntop = socket.inet_ntop # similar to collections.namedtuple, reproduced here because Python 2.6 did not have the rename logic diff --git a/docs/installation.rst b/docs/installation.rst index 64e00c8c40..1cb67cf130 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation Supported Platforms ------------------- -Python 2.7, 3.5, 3.6, 3.7 and 3.8 are supported. Both CPython (the standard Python +Python versions 3.6-3.11 are supported. Both CPython (the standard Python implementation) and `PyPy `_ are supported and tested. Linux, OSX, and Windows are supported. diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 54358d79b4..9928dfb7e2 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -392,7 +392,6 @@ def _id_and_mark(f): incorrect_test = lambda reason='This test seems to be incorrect and should be fixed', *args, **kwargs: pytest.mark.xfail(reason=reason, *args, **kwargs) pypy = unittest.skipUnless(platform.python_implementation() == "PyPy", "Test is skipped unless it's on PyPy") -notpy3 = unittest.skipIf(sys.version_info >= (3, 0), "Test not applicable for Python 3.x runtime") requiresmallclockgranularity = unittest.skipIf("Windows" in platform.system() or "asyncore" in EVENT_LOOP_MANAGER, "This test is not suitible for environments with large clock granularity") requiressimulacron = unittest.skipIf(SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"), "Simulacron jar hasn't been specified or C* version is 2.0") From fab07e13f86b070c1012901b8cf5587ccb9701c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 16 Oct 2023 21:47:13 +0200 Subject: [PATCH 254/551] Fix problems introduced while removing six six.iterkeys() returns an iterator, but Python's dict.keys() does not, so to pass it to iter() it needs to be first passed trough iter(). --- cassandra/datastax/graph/graphson.py | 2 +- tests/integration/advanced/graph/fluent/__init__.py | 4 ++-- tests/integration/advanced/graph/fluent/test_graph.py | 2 +- tests/integration/advanced/graph/test_graph_datatype.py | 4 ++-- tests/integration/advanced/graph/test_graph_query.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cassandra/datastax/graph/graphson.py b/cassandra/datastax/graph/graphson.py index cf3bf9a2cd..335c7f7825 100644 --- a/cassandra/datastax/graph/graphson.py +++ b/cassandra/datastax/graph/graphson.py @@ -135,7 +135,7 @@ def serialize(cls, value, writer=None): @classmethod def get_specialized_serializer(cls, value): - if type(value) in int and (value > MAX_INT32 or value < MIN_INT32): + if type(value) is int and (value > MAX_INT32 or value < MIN_INT32): return Int64TypeIO return Int32TypeIO diff --git a/tests/integration/advanced/graph/fluent/__init__.py b/tests/integration/advanced/graph/fluent/__init__.py index bde726c297..155de026c5 100644 --- a/tests/integration/advanced/graph/fluent/__init__.py +++ b/tests/integration/advanced/graph/fluent/__init__.py @@ -459,7 +459,7 @@ def _write_and_read_data_types(self, schema, graphson, use_schema=True): for data in schema.fixtures.datatypes().values(): typ, value, deserializer = data vertex_label = VertexLabel([typ]) - property_name = next(vertex_label.non_pk_properties.keys()) + property_name = next(iter(vertex_label.non_pk_properties.keys())) if use_schema or schema is CoreGraphSchema: schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) @@ -537,7 +537,7 @@ def __test_udt(self, schema, graphson, address_class, address_with_tags_class, g = self.fetch_traversal_source(graphson) for typ, value in data.values(): vertex_label = VertexLabel([typ]) - property_name = next(vertex_label.non_pk_properties.keys()) + property_name = next(iter(vertex_label.non_pk_properties.keys())) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) write_traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id). \ diff --git a/tests/integration/advanced/graph/fluent/test_graph.py b/tests/integration/advanced/graph/fluent/test_graph.py index 190292e6fe..911e6d5d57 100644 --- a/tests/integration/advanced/graph/fluent/test_graph.py +++ b/tests/integration/advanced/graph/fluent/test_graph.py @@ -121,7 +121,7 @@ def _send_batch_and_read_results(self, schema, graphson, add_all=False, use_sche for data in datatypes.values(): typ, value, deserializer = data vertex_label = VertexLabel([typ]) - property_name = next(vertex_label.non_pk_properties.keys()) + property_name = next(iter(vertex_label.non_pk_properties.keys())) values[property_name] = value if use_schema or schema is CoreGraphSchema: schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) diff --git a/tests/integration/advanced/graph/test_graph_datatype.py b/tests/integration/advanced/graph/test_graph_datatype.py index 1159527a32..8a261c94d9 100644 --- a/tests/integration/advanced/graph/test_graph_datatype.py +++ b/tests/integration/advanced/graph/test_graph_datatype.py @@ -87,7 +87,7 @@ def _test_all_datatypes(self, schema, graphson): for data in schema.fixtures.datatypes().values(): typ, value, deserializer = data vertex_label = VertexLabel([typ]) - property_name = next(vertex_label.non_pk_properties.keys()) + property_name = next(iter(vertex_label.non_pk_properties.keys())) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] @@ -168,7 +168,7 @@ def __test_udt(self, schema, graphson, address_class, address_with_tags_class, for typ, value in data.values(): vertex_label = VertexLabel([typ]) - property_name = next(vertex_label.non_pk_properties.keys()) + property_name = next(iter(vertex_label.non_pk_properties.keys())) schema.create_vertex_label(self.session, vertex_label, execution_profile=ep) vertex = list(schema.add_vertex(self.session, vertex_label, property_name, value, execution_profile=ep))[0] diff --git a/tests/integration/advanced/graph/test_graph_query.py b/tests/integration/advanced/graph/test_graph_query.py index fe65f616a3..0c889938d8 100644 --- a/tests/integration/advanced/graph/test_graph_query.py +++ b/tests/integration/advanced/graph/test_graph_query.py @@ -587,7 +587,7 @@ def _test_basic_query_with_type_wrapper(self, schema, graphson): vl = VertexLabel(['tupleOf(Int, Bigint)']) schema.create_vertex_label(self.session, vl, execution_profile=ep) - prop_name = next(vl.non_pk_properties.keys()) + prop_name = next(iter(vl.non_pk_properties.keys())) with self.assertRaises(InvalidRequest): schema.add_vertex(self.session, vl, prop_name, (1, 42), execution_profile=ep) From 43d9697fb41bd2d5fd0c833f5888ffea29f3b02f Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Fri, 2 Aug 2024 08:11:13 -0400 Subject: [PATCH 255/551] Fix only formatting in policy and tablets related code --- cassandra/policies.py | 65 +++++++++++++++++++------------------------ cassandra/tablets.py | 33 ++++++++++++---------- 2 files changed, 47 insertions(+), 51 deletions(-) diff --git a/cassandra/policies.py b/cassandra/policies.py index 6912877454..a1495f3660 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -364,46 +364,39 @@ def distance(self, *args, **kwargs): return self._child_policy.distance(*args, **kwargs) def make_query_plan(self, working_keyspace=None, query=None): - if query and query.keyspace: - keyspace = query.keyspace - else: - keyspace = working_keyspace + keyspace = query.keyspace if query and query.keyspace else working_keyspace child = self._child_policy - if query is None: + if query is None or query.routing_key is None or keyspace is None: for host in child.make_query_plan(keyspace, query): yield host - else: - routing_key = query.routing_key - if routing_key is None or keyspace is None: - for host in child.make_query_plan(keyspace, query): - yield host - else: - replicas = [] - if self._tablets_routing_v1: - tablet = self._cluster_metadata._tablets.get_tablet_for_key(keyspace, query.table, self._cluster_metadata.token_map.token_class.from_key(routing_key)) - - if tablet is not None: - replicas_mapped = set(map(lambda r: r[0], tablet.replicas)) - child_plan = child.make_query_plan(keyspace, query) - - replicas = [host for host in child_plan if host.host_id in replicas_mapped] - - if replicas == []: - replicas = self._cluster_metadata.get_replicas(keyspace, routing_key) - - if self.shuffle_replicas: - shuffle(replicas) - for replica in replicas: - if replica.is_up and \ - child.distance(replica) == HostDistance.LOCAL: - yield replica - - for host in child.make_query_plan(keyspace, query): - # skip if we've already listed this host - if host not in replicas or \ - child.distance(host) == HostDistance.REMOTE: - yield host + return + + replicas = [] + if self._tablets_routing_v1: + tablet = self._cluster_metadata._tablets.get_tablet_for_key( + keyspace, query.table, self._cluster_metadata.token_map.token_class.from_key(query.routing_key)) + + if tablet is not None: + replicas_mapped = set(map(lambda r: r[0], tablet.replicas)) + child_plan = child.make_query_plan(keyspace, query) + + replicas = [host for host in child_plan if host.host_id in replicas_mapped] + + if not replicas: + replicas = self._cluster_metadata.get_replicas(keyspace, query.routing_key) + + if self.shuffle_replicas: + shuffle(replicas) + + for replica in replicas: + if replica.is_up and child.distance(replica) == HostDistance.LOCAL: + yield replica + + for host in child.make_query_plan(keyspace, query): + # skip if we've already listed this host + if host not in replicas or child.distance(host) == HostDistance.REMOTE: + yield host def on_up(self, *args, **kwargs): return self._child_policy.on_up(*args, **kwargs) diff --git a/cassandra/tablets.py b/cassandra/tablets.py index aeba7fa8ad..5e638d78c2 100644 --- a/cassandra/tablets.py +++ b/cassandra/tablets.py @@ -1,6 +1,7 @@ # Experimental, this interface and use may change from threading import Lock + class Tablet(object): """ Represents a single ScyllaDB tablet. @@ -11,7 +12,7 @@ class Tablet(object): last_token = 0 replicas = None - def __init__(self, first_token = 0, last_token = 0, replicas = None): + def __init__(self, first_token=0, last_token=0, replicas=None): self.first_token = first_token self.last_token = last_token self.replicas = replicas @@ -28,10 +29,11 @@ def _is_valid_tablet(replicas): @staticmethod def from_row(first_token, last_token, replicas): if Tablet._is_valid_tablet(replicas): - tablet = Tablet(first_token, last_token,replicas) + tablet = Tablet(first_token, last_token, replicas) return tablet return None + # Experimental, this interface and use may change class Tablets(object): _lock = None @@ -43,10 +45,10 @@ def __init__(self, tablets): def get_tablet_for_key(self, keyspace, table, t): tablet = self._tablets.get((keyspace, table), []) - if tablet == []: + if not tablet: return None - id = bisect_left(tablet, t.value, key = lambda tablet: tablet.last_token) + id = bisect_left(tablet, t.value, key=lambda tablet: tablet.last_token) if id < len(tablet) and t.value > tablet[id].first_token: return tablet[id] return None @@ -55,13 +57,13 @@ def add_tablet(self, keyspace, table, tablet): with self._lock: tablets_for_table = self._tablets.setdefault((keyspace, table), []) - # find first overlaping range - start = bisect_left(tablets_for_table, tablet.first_token, key = lambda t: t.first_token) + # find first overlapping range + start = bisect_left(tablets_for_table, tablet.first_token, key=lambda t: t.first_token) if start > 0 and tablets_for_table[start - 1].last_token > tablet.first_token: start = start - 1 - # find last overlaping range - end = bisect_left(tablets_for_table, tablet.last_token, key = lambda t: t.last_token) + # find last overlapping range + end = bisect_left(tablets_for_table, tablet.last_token, key=lambda t: t.last_token) if end < len(tablets_for_table) and tablets_for_table[end].first_token >= tablet.last_token: end = end - 1 @@ -70,6 +72,7 @@ def add_tablet(self, keyspace, table, tablet): tablets_for_table.insert(start, tablet) + # bisect.bisect_left implementation from Python 3.11, needed untill support for # Python < 3.10 is dropped, it is needed to use `key` to extract last_token from # Tablet list - better solution performance-wise than materialize list of last_tokens @@ -97,11 +100,11 @@ def bisect_left(a, x, lo=0, hi=None, *, key=None): lo = mid + 1 else: hi = mid - else: - while lo < hi: - mid = (lo + hi) // 2 - if key(a[mid]) < x: - lo = mid + 1 - else: - hi = mid + return + while lo < hi: + mid = (lo + hi) // 2 + if key(a[mid]) < x: + lo = mid + 1 + else: + hi = mid return lo From c62665f4f34d5452134f4429eaa88e9aa0bee548 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Thu, 13 Jun 2024 09:28:16 +0200 Subject: [PATCH 256/551] Add RackAwareRoundRobinPolicy for host selection --- cassandra/cluster.py | 9 +- cassandra/metadata.py | 2 +- cassandra/policies.py | 152 +++++++++++++- docs/api/cassandra/policies.rst | 3 + .../standard/test_rack_aware_policy.py | 89 ++++++++ tests/unit/test_policies.py | 198 +++++++++++------- 6 files changed, 369 insertions(+), 84 deletions(-) create mode 100644 tests/integration/standard/test_rack_aware_policy.py diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 71be215ab1..06e6293ef8 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -492,7 +492,8 @@ def _profiles_without_explicit_lbps(self): def distance(self, host): distances = set(p.load_balancing_policy.distance(host) for p in self.profiles.values()) - return HostDistance.LOCAL if HostDistance.LOCAL in distances else \ + return HostDistance.LOCAL_RACK if HostDistance.LOCAL_RACK in distances else \ + HostDistance.LOCAL if HostDistance.LOCAL in distances else \ HostDistance.REMOTE if HostDistance.REMOTE in distances else \ HostDistance.IGNORED @@ -609,7 +610,7 @@ class Cluster(object): Defaults to loopback interface. - Note: When using :class:`.DCAwareLoadBalancingPolicy` with no explicit + Note: When using :class:`.DCAwareRoundRobinPolicy` with no explicit local_dc set (as is the default), the DC is chosen from an arbitrary host in contact_points. In this case, contact_points should contain only nodes from a single, local DC. @@ -1369,21 +1370,25 @@ def __init__(self, self._user_types = defaultdict(dict) self._min_requests_per_connection = { + HostDistance.LOCAL_RACK: DEFAULT_MIN_REQUESTS, HostDistance.LOCAL: DEFAULT_MIN_REQUESTS, HostDistance.REMOTE: DEFAULT_MIN_REQUESTS } self._max_requests_per_connection = { + HostDistance.LOCAL_RACK: DEFAULT_MAX_REQUESTS, HostDistance.LOCAL: DEFAULT_MAX_REQUESTS, HostDistance.REMOTE: DEFAULT_MAX_REQUESTS } self._core_connections_per_host = { + HostDistance.LOCAL_RACK: DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST, HostDistance.LOCAL: DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST, HostDistance.REMOTE: DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST } self._max_connections_per_host = { + HostDistance.LOCAL_RACK: DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST, HostDistance.LOCAL: DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST, HostDistance.REMOTE: DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST } diff --git a/cassandra/metadata.py b/cassandra/metadata.py index d30e6a1925..edee822e40 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -3436,7 +3436,7 @@ def group_keys_by_replica(session, keyspace, table, keys): all_replicas = cluster.metadata.get_replicas(keyspace, routing_key) # First check if there are local replicas valid_replicas = [host for host in all_replicas if - host.is_up and distance(host) == HostDistance.LOCAL] + host.is_up and distance(host) in [HostDistance.LOCAL, HostDistance.LOCAL_RACK]] if not valid_replicas: valid_replicas = [host for host in all_replicas if host.is_up] diff --git a/cassandra/policies.py b/cassandra/policies.py index a1495f3660..d9d3da7980 100644 --- a/cassandra/policies.py +++ b/cassandra/policies.py @@ -46,7 +46,18 @@ class HostDistance(object): connections opened to it. """ - LOCAL = 0 + LOCAL_RACK = 0 + """ + Nodes with ``LOCAL_RACK`` distance will be preferred for operations + under some load balancing policies (such as :class:`.RackAwareRoundRobinPolicy`) + and will have a greater number of connections opened against + them by default. + + This distance is typically used for nodes within the same + datacenter and the same rack as the client. + """ + + LOCAL = 1 """ Nodes with ``LOCAL`` distance will be preferred for operations under some load balancing policies (such as :class:`.DCAwareRoundRobinPolicy`) @@ -57,12 +68,12 @@ class HostDistance(object): datacenter as the client. """ - REMOTE = 1 + REMOTE = 2 """ Nodes with ``REMOTE`` distance will be treated as a last resort - by some load balancing policies (such as :class:`.DCAwareRoundRobinPolicy`) - and will have a smaller number of connections opened against - them by default. + by some load balancing policies (such as :class:`.DCAwareRoundRobinPolicy` + and :class:`.RackAwareRoundRobinPolicy`)and will have a smaller number of + connections opened against them by default. This distance is typically used for nodes outside of the datacenter that the client is running in. @@ -102,6 +113,11 @@ class LoadBalancingPolicy(HostStateListener): You may also use subclasses of :class:`.LoadBalancingPolicy` for custom behavior. + + You should always use immutable collections (e.g., tuples or + frozensets) to store information about hosts to prevent accidental + modification. When there are changes to the hosts (e.g., a host is + down or up), the old collection should be replaced with a new one. """ _hosts_lock = None @@ -316,6 +332,130 @@ def on_add(self, host): def on_remove(self, host): self.on_down(host) +class RackAwareRoundRobinPolicy(LoadBalancingPolicy): + """ + Similar to :class:`.DCAwareRoundRobinPolicy`, but prefers hosts + in the local rack, before hosts in the local datacenter but a + different rack, before hosts in all other datercentres + """ + + local_dc = None + local_rack = None + used_hosts_per_remote_dc = 0 + + def __init__(self, local_dc, local_rack, used_hosts_per_remote_dc=0): + """ + The `local_dc` and `local_rack` parameters should be the name of the + datacenter and rack (such as is reported by ``nodetool ring``) that + should be considered local. + + `used_hosts_per_remote_dc` controls how many nodes in + each remote datacenter will have connections opened + against them. In other words, `used_hosts_per_remote_dc` hosts + will be considered :attr:`~.HostDistance.REMOTE` and the + rest will be considered :attr:`~.HostDistance.IGNORED`. + By default, all remote hosts are ignored. + """ + self.local_rack = local_rack + self.local_dc = local_dc + self.used_hosts_per_remote_dc = used_hosts_per_remote_dc + self._live_hosts = {} + self._dc_live_hosts = {} + self._endpoints = [] + self._position = 0 + LoadBalancingPolicy.__init__(self) + + def _rack(self, host): + return host.rack or self.local_rack + + def _dc(self, host): + return host.datacenter or self.local_dc + + def populate(self, cluster, hosts): + for (dc, rack), rack_hosts in groupby(hosts, lambda host: (self._dc(host), self._rack(host))): + self._live_hosts[(dc, rack)] = tuple(set(rack_hosts)) + for dc, dc_hosts in groupby(hosts, lambda host: self._dc(host)): + self._dc_live_hosts[dc] = tuple(set(dc_hosts)) + + self._position = randint(0, len(hosts) - 1) if hosts else 0 + + def distance(self, host): + rack = self._rack(host) + dc = self._dc(host) + if rack == self.local_rack and dc == self.local_dc: + return HostDistance.LOCAL_RACK + + if dc == self.local_dc: + return HostDistance.LOCAL + + if not self.used_hosts_per_remote_dc: + return HostDistance.IGNORED + + dc_hosts = self._dc_live_hosts.get(dc, ()) + if not dc_hosts: + return HostDistance.IGNORED + if host in dc_hosts and dc_hosts.index(host) < self.used_hosts_per_remote_dc: + return HostDistance.REMOTE + else: + return HostDistance.IGNORED + + def make_query_plan(self, working_keyspace=None, query=None): + pos = self._position + self._position += 1 + + local_rack_live = self._live_hosts.get((self.local_dc, self.local_rack), ()) + pos = (pos % len(local_rack_live)) if local_rack_live else 0 + # Slice the cyclic iterator to start from pos and include the next len(local_live) elements + # This ensures we get exactly one full cycle starting from pos + for host in islice(cycle(local_rack_live), pos, pos + len(local_rack_live)): + yield host + + local_live = [host for host in self._dc_live_hosts.get(self.local_dc, ()) if host.rack != self.local_rack] + pos = (pos % len(local_live)) if local_live else 0 + for host in islice(cycle(local_live), pos, pos + len(local_live)): + yield host + + # the dict can change, so get candidate DCs iterating over keys of a copy + for dc, remote_live in self._dc_live_hosts.copy().items(): + if dc != self.local_dc: + for host in remote_live[:self.used_hosts_per_remote_dc]: + yield host + + def on_up(self, host): + dc = self._dc(host) + rack = self._rack(host) + with self._hosts_lock: + current_rack_hosts = self._live_hosts.get((dc, rack), ()) + if host not in current_rack_hosts: + self._live_hosts[(dc, rack)] = current_rack_hosts + (host, ) + current_dc_hosts = self._dc_live_hosts.get(dc, ()) + if host not in current_dc_hosts: + self._dc_live_hosts[dc] = current_dc_hosts + (host, ) + + def on_down(self, host): + dc = self._dc(host) + rack = self._rack(host) + with self._hosts_lock: + current_rack_hosts = self._live_hosts.get((dc, rack), ()) + if host in current_rack_hosts: + hosts = tuple(h for h in current_rack_hosts if h != host) + if hosts: + self._live_hosts[(dc, rack)] = hosts + else: + del self._live_hosts[(dc, rack)] + current_dc_hosts = self._dc_live_hosts.get(dc, ()) + if host in current_dc_hosts: + hosts = tuple(h for h in current_dc_hosts if h != host) + if hosts: + self._dc_live_hosts[dc] = hosts + else: + del self._dc_live_hosts[dc] + + def on_add(self, host): + self.on_up(host) + + def on_remove(self, host): + self.on_down(host) class TokenAwarePolicy(LoadBalancingPolicy): """ @@ -390,7 +530,7 @@ def make_query_plan(self, working_keyspace=None, query=None): shuffle(replicas) for replica in replicas: - if replica.is_up and child.distance(replica) == HostDistance.LOCAL: + if replica.is_up and child.distance(replica) in [HostDistance.LOCAL, HostDistance.LOCAL_RACK]: yield replica for host in child.make_query_plan(keyspace, query): diff --git a/docs/api/cassandra/policies.rst b/docs/api/cassandra/policies.rst index 387b19ed95..ea3b19d796 100644 --- a/docs/api/cassandra/policies.rst +++ b/docs/api/cassandra/policies.rst @@ -18,6 +18,9 @@ Load Balancing .. autoclass:: DCAwareRoundRobinPolicy :members: +.. autoclass:: RackAwareRoundRobinPolicy + :members: + .. autoclass:: WhiteListRoundRobinPolicy :members: diff --git a/tests/integration/standard/test_rack_aware_policy.py b/tests/integration/standard/test_rack_aware_policy.py new file mode 100644 index 0000000000..5d7a69642f --- /dev/null +++ b/tests/integration/standard/test_rack_aware_policy.py @@ -0,0 +1,89 @@ +import logging +import unittest + +from cassandra.cluster import Cluster +from cassandra.policies import ConstantReconnectionPolicy, RackAwareRoundRobinPolicy + +from tests.integration import PROTOCOL_VERSION, get_cluster, use_multidc + +LOGGER = logging.getLogger(__name__) + +def setup_module(): + use_multidc({'DC1': {'RC1': 2, 'RC2': 2}, 'DC2': {'RC1': 3}}) + +class RackAwareRoundRobinPolicyTests(unittest.TestCase): + @classmethod + def setup_class(cls): + cls.cluster = Cluster(contact_points=[node.address() for node in get_cluster().nodelist()], protocol_version=PROTOCOL_VERSION, + load_balancing_policy=RackAwareRoundRobinPolicy("DC1", "RC1", used_hosts_per_remote_dc=0), + reconnection_policy=ConstantReconnectionPolicy(1)) + cls.session = cls.cluster.connect() + cls.create_ks_and_cf(cls) + cls.create_data(cls.session) + cls.node1, cls.node2, cls.node3, cls.node4, cls.node5, cls.node6, cls.node7 = get_cluster().nodes.values() + + @classmethod + def teardown_class(cls): + cls.cluster.shutdown() + + def create_ks_and_cf(self): + self.session.execute( + """ + DROP KEYSPACE IF EXISTS test1 + """ + ) + self.session.execute( + """ + CREATE KEYSPACE test1 + WITH replication = { + 'class': 'NetworkTopologyStrategy', + 'replication_factor': 3 + } + """) + + self.session.execute( + """ + CREATE TABLE test1.table1 (pk int, ck int, v int, PRIMARY KEY (pk, ck)); + """) + + @staticmethod + def create_data(session): + prepared = session.prepare( + """ + INSERT INTO test1.table1 (pk, ck, v) VALUES (?, ?, ?) + """) + + for i in range(50): + bound = prepared.bind((i, i%5, i%2)) + session.execute(bound) + + def test_rack_aware(self): + prepared = self.session.prepare( + """ + SELECT pk, ck, v FROM test1.table1 WHERE pk = ? + """) + + for i in range (10): + bound = prepared.bind([i]) + results = self.session.execute(bound) + self.assertEqual(results, [(i, i%5, i%2)]) + coordinator = str(results.response_future.coordinator_host.endpoint) + self.assertTrue(coordinator in set(["127.0.0.1:9042", "127.0.0.2:9042"])) + + self.node2.stop(wait_other_notice=True, gently=True) + + for i in range (10): + bound = prepared.bind([i]) + results = self.session.execute(bound) + self.assertEqual(results, [(i, i%5, i%2)]) + coordinator =str(results.response_future.coordinator_host.endpoint) + self.assertEqual(coordinator, "127.0.0.1:9042") + + self.node1.stop(wait_other_notice=True, gently=True) + + for i in range (10): + bound = prepared.bind([i]) + results = self.session.execute(bound) + self.assertEqual(results, [(i, i%5, i%2)]) + coordinator = str(results.response_future.coordinator_host.endpoint) + self.assertTrue(coordinator in set(["127.0.0.3:9042", "127.0.0.4:9042"])) diff --git a/tests/unit/test_policies.py b/tests/unit/test_policies.py index 877731dc08..15bd1ea95b 100644 --- a/tests/unit/test_policies.py +++ b/tests/unit/test_policies.py @@ -17,6 +17,7 @@ from itertools import islice, cycle from mock import Mock, patch, call from random import randint +import pytest from _thread import LockType import sys import struct @@ -25,7 +26,7 @@ from cassandra import ConsistencyLevel from cassandra.cluster import Cluster, ControlConnection from cassandra.metadata import Metadata -from cassandra.policies import (RoundRobinPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, +from cassandra.policies import (RackAwareRoundRobinPolicy, RoundRobinPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, TokenAwarePolicy, SimpleConvictionPolicy, HostDistance, ExponentialReconnectionPolicy, RetryPolicy, WriteType, @@ -177,75 +178,107 @@ def test_no_live_nodes(self): qplan = list(policy.make_query_plan()) self.assertEqual(qplan, []) +@pytest.mark.parametrize("policy_specialization, constructor_args", [(DCAwareRoundRobinPolicy, ("dc1", )), (RackAwareRoundRobinPolicy, ("dc1", "rack1"))]) +class TestRackOrDCAwareRoundRobinPolicy: -class DCAwareRoundRobinPolicyTest(unittest.TestCase): - - def test_no_remote(self): + def test_no_remote(self, policy_specialization, constructor_args): hosts = [] - for i in range(4): + for i in range(2): h = Host(DefaultEndPoint(i), SimpleConvictionPolicy) + h.set_location_info("dc1", "rack2") + hosts.append(h) + for i in range(2): + h = Host(DefaultEndPoint(i + 2), SimpleConvictionPolicy) h.set_location_info("dc1", "rack1") hosts.append(h) - policy = DCAwareRoundRobinPolicy("dc1") + policy = policy_specialization(*constructor_args) policy.populate(None, hosts) qplan = list(policy.make_query_plan()) - self.assertEqual(sorted(qplan), sorted(hosts)) + assert sorted(qplan) == sorted(hosts) - def test_with_remotes(self): - hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(4)] + def test_with_remotes(self, policy_specialization, constructor_args): + hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(6)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") - for h in hosts[2:]: + for h in hosts[2:4]: + h.set_location_info("dc1", "rack2") + for h in hosts[4:]: h.set_location_info("dc2", "rack1") - local_hosts = set(h for h in hosts if h.datacenter == "dc1") + local_rack_hosts = set(h for h in hosts if h.datacenter == "dc1" and h.rack == "rack1") + local_hosts = set(h for h in hosts if h.datacenter == "dc1" and h.rack != "rack1") remote_hosts = set(h for h in hosts if h.datacenter != "dc1") # allow all of the remote hosts to be used - policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2) + policy = policy_specialization(*constructor_args, used_hosts_per_remote_dc=2) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) - self.assertEqual(set(qplan[:2]), local_hosts) - self.assertEqual(set(qplan[2:]), remote_hosts) + if isinstance(policy_specialization, DCAwareRoundRobinPolicy): + assert set(qplan[:4]) == local_rack_hosts + local_hosts + elif isinstance(policy_specialization, RackAwareRoundRobinPolicy): + assert set(qplan[:2]) == local_rack_hosts + assert set(qplan[2:4]) == local_hosts + assert set(qplan[4:]) == remote_hosts # allow only one of the remote hosts to be used - policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) + policy = policy_specialization(*constructor_args, used_hosts_per_remote_dc=1) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) - self.assertEqual(set(qplan[:2]), local_hosts) + if isinstance(policy_specialization, DCAwareRoundRobinPolicy): + assert set(qplan[:4]) == local_rack_hosts + local_hosts + elif isinstance(policy_specialization, RackAwareRoundRobinPolicy): + assert set(qplan[:2]) == local_rack_hosts + assert set(qplan[2:4]) == local_hosts - used_remotes = set(qplan[2:]) - self.assertEqual(1, len(used_remotes)) - self.assertIn(qplan[2], remote_hosts) + used_remotes = set(qplan[4:]) + assert 1 == len(used_remotes) + assert qplan[4] in remote_hosts # allow no remote hosts to be used - policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) + policy = policy_specialization(*constructor_args, used_hosts_per_remote_dc=0) policy.populate(Mock(), hosts) qplan = list(policy.make_query_plan()) - self.assertEqual(2, len(qplan)) - self.assertEqual(local_hosts, set(qplan)) - def test_get_distance(self): - policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0) + assert 4 == len(qplan) + if isinstance(policy_specialization, DCAwareRoundRobinPolicy): + assert set(qplan) == local_rack_hosts + local_hosts + elif isinstance(policy_specialization, RackAwareRoundRobinPolicy): + assert set(qplan[:2]) == local_rack_hosts + assert set(qplan[2:4]) == local_hosts + + def test_get_distance(self, policy_specialization, constructor_args): + policy = policy_specialization(*constructor_args, used_hosts_per_remote_dc=0) + + # same dc, same rack host = Host(DefaultEndPoint("ip1"), SimpleConvictionPolicy) host.set_location_info("dc1", "rack1") policy.populate(Mock(), [host]) - self.assertEqual(policy.distance(host), HostDistance.LOCAL) + if isinstance(policy_specialization, DCAwareRoundRobinPolicy): + assert policy.distance(host) == HostDistance.LOCAL + elif isinstance(policy_specialization, RackAwareRoundRobinPolicy): + assert policy.distance(host) == HostDistance.LOCAL_RACK + + # same dc different rack + host = Host(DefaultEndPoint("ip1"), SimpleConvictionPolicy) + host.set_location_info("dc1", "rack2") + policy.populate(Mock(), [host]) + + assert policy.distance(host) == HostDistance.LOCAL # used_hosts_per_remote_dc is set to 0, so ignore it remote_host = Host(DefaultEndPoint("ip2"), SimpleConvictionPolicy) remote_host.set_location_info("dc2", "rack1") - self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) + assert policy.distance(remote_host) == HostDistance.IGNORED # dc2 isn't registered in the policy's live_hosts dict policy.used_hosts_per_remote_dc = 1 - self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED) + assert policy.distance(remote_host) == HostDistance.IGNORED # make sure the policy has both dcs registered policy.populate(Mock(), [host, remote_host]) - self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE) + assert policy.distance(remote_host) == HostDistance.REMOTE # since used_hosts_per_remote_dc is set to 1, only the first # remote host in dc2 will be REMOTE, the rest are IGNORED @@ -253,54 +286,58 @@ def test_get_distance(self): second_remote_host.set_location_info("dc2", "rack1") policy.populate(Mock(), [host, remote_host, second_remote_host]) distances = set([policy.distance(remote_host), policy.distance(second_remote_host)]) - self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED])) + assert distances == set([HostDistance.REMOTE, HostDistance.IGNORED]) - def test_status_updates(self): - hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(4)] + def test_status_updates(self, policy_specialization, constructor_args): + hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(5)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") - for h in hosts[2:]: + for h in hosts[2:4]: + h.set_location_info("dc1", "rack2") + for h in hosts[4:]: h.set_location_info("dc2", "rack1") - policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) + policy = policy_specialization(*constructor_args, used_hosts_per_remote_dc=1) policy.populate(Mock(), hosts) policy.on_down(hosts[0]) policy.on_remove(hosts[2]) - new_local_host = Host(DefaultEndPoint(4), SimpleConvictionPolicy) + new_local_host = Host(DefaultEndPoint(5), SimpleConvictionPolicy) new_local_host.set_location_info("dc1", "rack1") policy.on_up(new_local_host) - new_remote_host = Host(DefaultEndPoint(5), SimpleConvictionPolicy) + new_remote_host = Host(DefaultEndPoint(6), SimpleConvictionPolicy) new_remote_host.set_location_info("dc9000", "rack1") policy.on_add(new_remote_host) - # we now have two local hosts and two remote hosts in separate dcs + # we now have three local hosts and two remote hosts in separate dcs qplan = list(policy.make_query_plan()) - self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host])) - self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host])) + + assert set(qplan[:3]) == set([hosts[1], new_local_host, hosts[3]]) + assert set(qplan[3:]) == set([hosts[4], new_remote_host]) # since we have hosts in dc9000, the distance shouldn't be IGNORED - self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE) + assert policy.distance(new_remote_host), HostDistance.REMOTE policy.on_down(new_local_host) policy.on_down(hosts[1]) qplan = list(policy.make_query_plan()) - self.assertEqual(set(qplan), set([hosts[3], new_remote_host])) + assert set(qplan) == set([hosts[3], hosts[4], new_remote_host]) policy.on_down(new_remote_host) policy.on_down(hosts[3]) + policy.on_down(hosts[4]) qplan = list(policy.make_query_plan()) - self.assertEqual(qplan, []) + assert qplan == [] - def test_modification_during_generation(self): + def test_modification_during_generation(self, policy_specialization, constructor_args): hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(4)] for h in hosts[:2]: h.set_location_info("dc1", "rack1") for h in hosts[2:]: h.set_location_info("dc2", "rack1") - policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=3) + policy = policy_specialization(*constructor_args, used_hosts_per_remote_dc=3) policy.populate(Mock(), hosts) # The general concept here is to change thee internal state of the @@ -315,20 +352,20 @@ def test_modification_during_generation(self): plan = policy.make_query_plan() policy.on_up(new_host) # local list is not bound yet, so we get to see that one - self.assertEqual(len(list(plan)), 3 + 2) + assert len(list(plan)) == 3 + 2 # remove local before iteration plan = policy.make_query_plan() policy.on_down(new_host) # local list is not bound yet, so we don't see it - self.assertEqual(len(list(plan)), 2 + 2) + assert len(list(plan)) == 2 + 2 # new local after starting iteration plan = policy.make_query_plan() next(plan) policy.on_up(new_host) # local list was is bound, and one consumed, so we only see the other original - self.assertEqual(len(list(plan)), 1 + 2) + assert len(list(plan)) == 1 + 2 # remove local after traversing available plan = policy.make_query_plan() @@ -336,7 +373,7 @@ def test_modification_during_generation(self): next(plan) policy.on_down(new_host) # we should be past the local list - self.assertEqual(len(list(plan)), 0 + 2) + assert len(list(plan)) == 0 + 2 # REMOTES CHANGE new_host.set_location_info("dc2", "rack1") @@ -347,7 +384,7 @@ def test_modification_during_generation(self): next(plan) policy.on_up(new_host) # list is updated before we get to it - self.assertEqual(len(list(plan)), 0 + 3) + assert len(list(plan)) == 0 + 3 # remove remote after traversing local, but not starting remote plan = policy.make_query_plan() @@ -355,7 +392,7 @@ def test_modification_during_generation(self): next(plan) policy.on_down(new_host) # list is updated before we get to it - self.assertEqual(len(list(plan)), 0 + 2) + assert len(list(plan)) == 0 + 2 # new remote after traversing local, and starting remote plan = policy.make_query_plan() @@ -363,7 +400,7 @@ def test_modification_during_generation(self): next(plan) policy.on_up(new_host) # slice is already made, and we've consumed one - self.assertEqual(len(list(plan)), 0 + 1) + assert len(list(plan)) == 0 + 1 # remove remote after traversing local, and starting remote plan = policy.make_query_plan() @@ -371,7 +408,7 @@ def test_modification_during_generation(self): next(plan) policy.on_down(new_host) # slice is created with all present, and we've consumed one - self.assertEqual(len(list(plan)), 0 + 2) + assert len(list(plan)) == 0 + 2 # local DC disappears after finishing it, but not starting remote plan = policy.make_query_plan() @@ -380,7 +417,7 @@ def test_modification_during_generation(self): policy.on_down(hosts[0]) policy.on_down(hosts[1]) # dict traversal starts as normal - self.assertEqual(len(list(plan)), 0 + 2) + assert len(list(plan)) == 0 + 2 policy.on_up(hosts[0]) policy.on_up(hosts[1]) @@ -393,7 +430,7 @@ def test_modification_during_generation(self): policy.on_down(hosts[0]) policy.on_down(hosts[1]) # dict traversal has begun and consumed one - self.assertEqual(len(list(plan)), 0 + 1) + assert len(list(plan)) == 0 + 1 policy.on_up(hosts[0]) policy.on_up(hosts[1]) @@ -404,7 +441,7 @@ def test_modification_during_generation(self): policy.on_down(hosts[2]) policy.on_down(hosts[3]) # nothing left - self.assertEqual(len(list(plan)), 0 + 0) + assert len(list(plan)) == 0 + 0 policy.on_up(hosts[2]) policy.on_up(hosts[3]) @@ -415,7 +452,7 @@ def test_modification_during_generation(self): policy.on_down(hosts[2]) policy.on_down(hosts[3]) # we continue with remainder of original list - self.assertEqual(len(list(plan)), 0 + 1) + assert len(list(plan)) == 0 + 1 policy.on_up(hosts[2]) policy.on_up(hosts[3]) @@ -430,7 +467,7 @@ def test_modification_during_generation(self): policy.on_up(new_host) policy.on_up(another_host) # we continue with remainder of original list - self.assertEqual(len(list(plan)), 0 + 1) + assert len(list(plan)), 0 + 1 # remote DC disappears after finishing it plan = policy.make_query_plan() @@ -444,9 +481,9 @@ def test_modification_during_generation(self): for h in down_hosts: policy.on_down(h) # the last DC has two - self.assertEqual(len(list(plan)), 0 + 2) + assert len(list(plan)), 0 + 2 - def test_no_live_nodes(self): + def test_no_live_nodes(self, policy_specialization, constructor_args): """ Ensure query plan for a downed cluster will execute without errors """ @@ -457,25 +494,37 @@ def test_no_live_nodes(self): h.set_location_info("dc1", "rack1") hosts.append(h) - policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) + policy = policy_specialization(*constructor_args, used_hosts_per_remote_dc=1) policy.populate(Mock(), hosts) for host in hosts: policy.on_down(host) qplan = list(policy.make_query_plan()) - self.assertEqual(qplan, []) + assert qplan == [] - def test_no_nodes(self): + def test_no_nodes(self, policy_specialization, constructor_args): """ Ensure query plan for an empty cluster will execute without errors """ - policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1) + policy = policy_specialization(*constructor_args, used_hosts_per_remote_dc=1) policy.populate(None, []) qplan = list(policy.make_query_plan()) - self.assertEqual(qplan, []) + assert qplan == [] + + def test_wrong_dc(self, policy_specialization, constructor_args): + hosts = [Host(DefaultEndPoint(i), SimpleConvictionPolicy) for i in range(3)] + for h in hosts[:3]: + h.set_location_info("dc2", "rack2") + + policy = policy_specialization(*constructor_args, used_hosts_per_remote_dc=0) + policy.populate(Mock(), hosts) + qplan = list(policy.make_query_plan()) + assert len(qplan) == 0 + +class DCAwareRoundRobinPolicyTest(unittest.TestCase): def test_default_dc(self): host_local = Host(DefaultEndPoint(1), SimpleConvictionPolicy, 'local') @@ -488,35 +537,34 @@ def test_default_dc(self): # contact DC first policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) - self.assertFalse(policy.local_dc) + assert not policy.local_dc policy.on_add(host_local) policy.on_add(host_remote) - self.assertNotEqual(policy.local_dc, host_remote.datacenter) - self.assertEqual(policy.local_dc, host_local.datacenter) + assert policy.local_dc != host_remote.datacenter + assert policy.local_dc == host_local.datacenter # contact DC second policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) - self.assertFalse(policy.local_dc) + assert not policy.local_dc policy.on_add(host_remote) policy.on_add(host_local) - self.assertNotEqual(policy.local_dc, host_remote.datacenter) - self.assertEqual(policy.local_dc, host_local.datacenter) + assert policy.local_dc != host_remote.datacenter + assert policy.local_dc == host_local.datacenter # no DC policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) - self.assertFalse(policy.local_dc) + assert not policy.local_dc policy.on_add(host_none) - self.assertFalse(policy.local_dc) + assert not policy.local_dc # only other DC policy = DCAwareRoundRobinPolicy() policy.populate(cluster, [host_none]) - self.assertFalse(policy.local_dc) + assert not policy.local_dc policy.on_add(host_remote) - self.assertFalse(policy.local_dc) - + assert not policy.local_dc class TokenAwarePolicyTest(unittest.TestCase): @@ -1274,7 +1322,7 @@ def test_hosts_with_hostname(self): self.assertEqual(sorted(qplan), [host]) self.assertEqual(policy.distance(host), HostDistance.LOCAL) - + def test_hosts_with_socket_hostname(self): hosts = [UnixSocketEndPoint('/tmp/scylla-workdir/cql.m')] policy = WhiteListRoundRobinPolicy(hosts) From 51d22708841c80c11c76c0b7f69bd35deba2fa3e Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Mon, 5 Aug 2024 09:45:45 -0400 Subject: [PATCH 257/551] Fix driver name --- cassandra/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cassandra/connection.py b/cassandra/connection.py index 9fa2a991ec..ebdfe99993 100644 --- a/cassandra/connection.py +++ b/cassandra/connection.py @@ -109,7 +109,7 @@ def decompress(byts): return snappy.decompress(byts) locally_supported_compressions['snappy'] = (snappy.compress, decompress) -DRIVER_NAME, DRIVER_VERSION = 'Scylla Python Driver', sys.modules['cassandra'].__version__ +DRIVER_NAME, DRIVER_VERSION = 'ScyllaDB Python Driver', sys.modules['cassandra'].__version__ PROTOCOL_VERSION_MASK = 0x7f From 55371d881e231a114ecf896738b90c6eb9afcf48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Mon, 5 Aug 2024 16:47:56 +0200 Subject: [PATCH 258/551] Revert "ci: enable pytest run debug" This reverts commit cdd125adbc7b0af1a9e5a1deaa5fc3d03a2b03f4. --- ci/run_integration_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh index f7f1f8769e..2796a33e61 100755 --- a/ci/run_integration_test.sh +++ b/ci/run_integration_test.sh @@ -37,5 +37,5 @@ ccm remove # run test export MAPPED_SCYLLA_VERSION=3.11.4 -PROTOCOL_VERSION=4 pytest -vv -s --log-cli-level=debug -rf --import-mode append $* +PROTOCOL_VERSION=4 pytest -rf --import-mode append $* From 96edeb92eda4806a701ffe626d4d3435fac1eca5 Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Mon, 5 Aug 2024 12:13:02 -0400 Subject: [PATCH 259/551] Start using 6.0.2 for tablets tests --- .github/workflows/integration-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 8c364e93a1..e2f2ece3d8 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -38,5 +38,5 @@ jobs: - name: Test tablets run: | export EVENT_LOOP_MANAGER=${{ matrix.event_loop_manager }} - export SCYLLA_VERSION='unstable/master:2024-01-17T17:56:00Z' + export SCYLLA_VERSION='release:6.0.2' ./ci/run_integration_test.sh tests/integration/experiments/ From 156dde7a2fdeff1aa1d836d3e1596eb543255b24 Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Thu, 15 Aug 2024 13:43:36 -0400 Subject: [PATCH 260/551] Make test_compression_disabled expect proper value for scylla --- tests/integration/standard/test_metadata.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 86f48f88d5..f706e7c0bd 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -40,7 +40,7 @@ greaterthancass21, assert_startswith, greaterthanorequalcass40, greaterthanorequaldse67, lessthancass40, TestCluster, DSE_VERSION, requires_java_udf, requires_composite_type, - requires_collection_indexes, xfail_scylla) + requires_collection_indexes, SCYLLA_VERSION) from tests.util import wait_until @@ -531,14 +531,14 @@ def test_collection_indexes(self): tablemeta = self.get_table_metadata() self.assertIn('(full(b))', tablemeta.export_as_string()) - #TODO: Fix Scylla or test - @xfail_scylla('Scylla prints `compression = {}` instead of `compression = {\'enabled\': \'false\'}`.') def test_compression_disabled(self): create_statement = self.make_create_statement(["a"], ["b"], ["c"]) create_statement += " WITH compression = {}" self.session.execute(create_statement) tablemeta = self.get_table_metadata() - expected = "compression = {}" if CASSANDRA_VERSION < Version("3.0") else "compression = {'enabled': 'false'}" + expected = "compression = {'enabled': 'false'}" + if SCYLLA_VERSION is not None or CASSANDRA_VERSION < Version("3.0"): + expected = "compression = {}" self.assertIn(expected, tablemeta.export_as_string()) def test_non_size_tiered_compaction(self): From 27b892b670793a155609003534fb4abd599de7da Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Thu, 15 Aug 2024 14:20:36 -0400 Subject: [PATCH 261/551] Populate issue number to test_client_warnings --- tests/integration/standard/test_client_warnings.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/integration/standard/test_client_warnings.py b/tests/integration/standard/test_client_warnings.py index 194d0aa18f..ce5332a59f 100644 --- a/tests/integration/standard/test_client_warnings.py +++ b/tests/integration/standard/test_client_warnings.py @@ -24,10 +24,7 @@ def setup_module(): use_singledc() - -# Failing with scylla because there is no warning message when changing the value of 'batch_size_warn_threshold_in_kb' -# config") -@xfail_scylla('Empty warnings: TypeError: object of type \'NoneType\' has no len()') +@xfail_scylla('scylladb/scylladb#10196 - scylla does not report warnings') class ClientWarningTests(unittest.TestCase): @classmethod From 6e2a736c3f92db8ba3ffbe01fb76686f65aa0eee Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Fri, 16 Aug 2024 09:30:37 -0400 Subject: [PATCH 262/551] Make MAPPED_SCYLLA_VERSION a soft requirement It is going to ease development and test process. From now on if you want to run it on release you can just run it as such: SCYLLA_VERSION="6.0.2" pytest .... --- ci/run_integration_test.sh | 1 - tests/integration/__init__.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh index 2796a33e61..a625a8eca2 100755 --- a/ci/run_integration_test.sh +++ b/ci/run_integration_test.sh @@ -36,6 +36,5 @@ ccm remove # run test -export MAPPED_SCYLLA_VERSION=3.11.4 PROTOCOL_VERSION=4 pytest -rf --import-mode append $* diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 9928dfb7e2..dd359f0d27 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -185,10 +185,10 @@ def _get_dse_version_from_cass(cass_version): DSE_CRED = os.getenv('DSE_CREDS', None) CASSANDRA_VERSION = _get_cass_version_from_dse(DSE_VERSION.base_version) CCM_VERSION = DSE_VERSION.base_version -else: # we are testing against Cassandra or DDAC +else: # we are testing against Cassandra,DDAC or Scylla if SCYLLA_VERSION: cv_string = SCYLLA_VERSION - mcv_string = os.getenv('MAPPED_SCYLLA_VERSION', None) + mcv_string = os.getenv('MAPPED_SCYLLA_VERSION', '3.11.4') # Assume that scylla matches cassandra `3.11.4` behavior else: cv_string = os.getenv('CASSANDRA_VERSION', None) mcv_string = os.getenv('MAPPED_CASSANDRA_VERSION', None) From bd3e9b967893f0a1c398a1daa9368a15801fa415 Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Fri, 27 Sep 2024 11:48:15 -0400 Subject: [PATCH 263/551] Update building library for windows openssl 3.3.1 was removed from the hosting, we need to update to 3.3.2 to keep CICD running --- .github/workflows/build-push.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 53be975be1..8a7ce9937a 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -61,7 +61,7 @@ jobs: - name: Install OpenSSL for Windows if: runner.os == 'Windows' run: | - choco install openssl --version=3.3.1 -f -y + choco install openssl --version=3.3.2 -f -y - name: Install Conan if: runner.os == 'Windows' From ea2b70f50a3db939f80981468ae2f255e427729b Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Fri, 27 Sep 2024 11:07:51 +0200 Subject: [PATCH 264/551] Remove experimental options from tablets implementation Since 6.0.0 tablets are no longer experimental, so there is no need for stating that it is. Also to test tablets we use scylladb version where there is no need to pass 'consistent-topology-changes' and 'tablets' in 'experimental_features' configuration option. --- cassandra/query.py | 4 ++-- cassandra/tablets.py | 2 -- tests/integration/__init__.py | 7 ++----- tests/integration/experiments/test_tablets.py | 2 +- 4 files changed, 5 insertions(+), 10 deletions(-) diff --git a/cassandra/query.py b/cassandra/query.py index bd8ccd888d..42a10e2382 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -254,8 +254,8 @@ class Statement(object): table = None """ The string name of the table this query acts on. This is used when the tablet - experimental feature is enabled and in the same time :class`~.TokenAwarePolicy` - is configured in the profile load balancing policy. + feature is enabled and in the same time :class`~.TokenAwarePolicy` is configured + in the profile load balancing policy. """ custom_payload = None diff --git a/cassandra/tablets.py b/cassandra/tablets.py index 5e638d78c2..1e0c99fa47 100644 --- a/cassandra/tablets.py +++ b/cassandra/tablets.py @@ -1,4 +1,3 @@ -# Experimental, this interface and use may change from threading import Lock @@ -34,7 +33,6 @@ def from_row(first_token, last_token, replicas): return None -# Experimental, this interface and use may change class Tablets(object): _lock = None _tablets = {} diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index dd359f0d27..8c31bf85b6 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -506,7 +506,7 @@ def start_cluster_wait_for_up(cluster): def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, set_keyspace=True, ccm_options=None, - configuration_options=None, dse_options=None, use_single_interface=USE_SINGLE_INTERFACE, use_tablets=False): + configuration_options=None, dse_options=None, use_single_interface=USE_SINGLE_INTERFACE): configuration_options = configuration_options or {} dse_options = dse_options or {} workloads = workloads or [] @@ -616,10 +616,7 @@ def use_cluster(cluster_name, nodes, ipformat=None, start=True, workloads=None, # CDC is causing an issue (can't start cluster with multiple seeds) # Selecting only features we need for tests, i.e. anything but CDC. CCM_CLUSTER = CCMScyllaCluster(path, cluster_name, **ccm_options) - if use_tablets: - CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf', 'consistent-topology-changes', 'tablets'], 'start_native_transport': True}) - else: - CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf'], 'start_native_transport': True}) + CCM_CLUSTER.set_configuration_options({'experimental_features': ['lwt', 'udf'], 'start_native_transport': True}) CCM_CLUSTER.set_configuration_options({'skip_wait_for_gossip_to_settle': 0}) # Permit IS NOT NULL restriction on non-primary key columns of a materialized view diff --git a/tests/integration/experiments/test_tablets.py b/tests/integration/experiments/test_tablets.py index 5b146f6ebd..d37a8201c8 100644 --- a/tests/integration/experiments/test_tablets.py +++ b/tests/integration/experiments/test_tablets.py @@ -9,7 +9,7 @@ from tests.unit.test_host_connection_pool import LOGGER def setup_module(): - use_cluster('tablets', [3], start=True, use_tablets=True) + use_cluster('tablets', [3], start=True) class TestTabletsIntegration(unittest.TestCase): @classmethod From f2cc29ddc562cb017375cb8016f88af5712ac921 Mon Sep 17 00:00:00 2001 From: Sylwia Szunejko Date: Fri, 27 Sep 2024 11:11:35 +0200 Subject: [PATCH 265/551] Fix whitespaces and indentations --- cassandra/tablets.py | 6 +++--- tests/integration/experiments/test_tablets.py | 20 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/cassandra/tablets.py b/cassandra/tablets.py index 1e0c99fa47..61394eace5 100644 --- a/cassandra/tablets.py +++ b/cassandra/tablets.py @@ -4,7 +4,7 @@ class Tablet(object): """ Represents a single ScyllaDB tablet. - It stores information about each replica, its host and shard, + It stores information about each replica, its host and shard, and the token interval in the format (first_token, last_token]. """ first_token = 0 @@ -40,12 +40,12 @@ class Tablets(object): def __init__(self, tablets): self._tablets = tablets self._lock = Lock() - + def get_tablet_for_key(self, keyspace, table, t): tablet = self._tablets.get((keyspace, table), []) if not tablet: return None - + id = bisect_left(tablet, t.value, key=lambda tablet: tablet.last_token) if id < len(tablet) and t.value > tablet[id].first_token: return tablet[id] diff --git a/tests/integration/experiments/test_tablets.py b/tests/integration/experiments/test_tablets.py index d37a8201c8..98e65c5383 100644 --- a/tests/integration/experiments/test_tablets.py +++ b/tests/integration/experiments/test_tablets.py @@ -20,7 +20,7 @@ def setup_class(cls): cls.session = cls.cluster.connect() cls.create_ks_and_cf(cls) cls.create_data(cls.session) - + @classmethod def teardown_class(cls): cls.cluster.shutdown() @@ -32,7 +32,7 @@ def verify_same_host_in_tracing(self, results): for event in events: LOGGER.info("TRACE EVENT: %s %s %s", event.source, event.thread_name, event.description) host_set.add(event.source) - + self.assertEqual(len(host_set), 1) self.assertIn('locally', "\n".join([event.description for event in events])) @@ -43,7 +43,7 @@ def verify_same_host_in_tracing(self, results): for event in events: LOGGER.info("TRACE EVENT: %s %s", event.source, event.activity) host_set.add(event.source) - + self.assertEqual(len(host_set), 1) self.assertIn('locally', "\n".join([event.activity for event in events])) @@ -54,7 +54,7 @@ def verify_same_shard_in_tracing(self, results): for event in events: LOGGER.info("TRACE EVENT: %s %s %s", event.source, event.thread_name, event.description) shard_set.add(event.thread_name) - + self.assertEqual(len(shard_set), 1) self.assertIn('locally', "\n".join([event.description for event in events])) @@ -65,10 +65,10 @@ def verify_same_shard_in_tracing(self, results): for event in events: LOGGER.info("TRACE EVENT: %s %s", event.thread, event.activity) shard_set.add(event.thread) - + self.assertEqual(len(shard_set), 1) self.assertIn('locally', "\n".join([event.activity for event in events])) - + def create_ks_and_cf(self): self.session.execute( """ @@ -79,8 +79,8 @@ def create_ks_and_cf(self): """ CREATE KEYSPACE test1 WITH replication = { - 'class': 'NetworkTopologyStrategy', - 'replication_factor': 1 + 'class': 'NetworkTopologyStrategy', + 'replication_factor': 1 } AND tablets = { 'initial': 8 } @@ -90,14 +90,14 @@ def create_ks_and_cf(self): """ CREATE TABLE test1.table1 (pk int, ck int, v int, PRIMARY KEY (pk, ck)); """) - + @staticmethod def create_data(session): prepared = session.prepare( """ INSERT INTO test1.table1 (pk, ck, v) VALUES (?, ?, ?) """) - + for i in range(50): bound = prepared.bind((i, i%5, i%2)) session.execute(bound) From f11216506c4f621177a16781ace0778fbac3d28b Mon Sep 17 00:00:00 2001 From: David Garcia Date: Sun, 29 Sep 2024 08:12:34 +0100 Subject: [PATCH 266/551] docs: update theme 1.8 --- .github/dependabot.yml | 11 + .gitignore | 1 - docs/Makefile | 1 - docs/poetry.lock | 1579 ++++++++++++++++++++++++++++++++++++++++ docs/pyproject.toml | 18 +- 5 files changed, 1599 insertions(+), 11 deletions(-) create mode 100644 .github/dependabot.yml create mode 100644 docs/poetry.lock diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..7811ce0305 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/docs" + schedule: + interval: "daily" + ignore: + - dependency-name: "*" + allow: + - dependency-name: "sphinx-scylladb-theme" + - dependency-name: "sphinx-multiversion-scylla" diff --git a/.gitignore b/.gitignore index 4541d034f0..88e934235e 100644 --- a/.gitignore +++ b/.gitignore @@ -14,7 +14,6 @@ dist nosetests.xml cover/ docs/_build/ -docs/poetry.lock tests/integration/ccm setuptools*.tar.gz setuptools*.egg diff --git a/docs/Makefile b/docs/Makefile index d1c3a4c8ec..4ac5db5297 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -35,7 +35,6 @@ pristine: clean .PHONY: clean clean: rm -rf $(BUILDDIR)/* - rm -f poetry.lock # Generate output commands .PHONY: dirhtml diff --git a/docs/poetry.lock b/docs/poetry.lock new file mode 100644 index 0000000000..4bb20a14e5 --- /dev/null +++ b/docs/poetry.lock @@ -0,0 +1,1579 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "aenum" +version = "2.2.6" +description = "Advanced Enumerations (compatible with Python's stdlib Enum), NamedTuples, and NamedConstants" +optional = false +python-versions = "*" +files = [ + {file = "aenum-2.2.6-py2-none-any.whl", hash = "sha256:aaebe735508d9cbc72cd6adfb59660a5e676dfbeb6fb24fb090041e7ddb8d3b3"}, + {file = "aenum-2.2.6-py3-none-any.whl", hash = "sha256:f9d20f7302ce3dc3639b3f75c3b3e146f3b22409a6b4513c1f0bd6dbdfcbd8c1"}, + {file = "aenum-2.2.6.tar.gz", hash = "sha256:260225470b49429f5893a195a8b99c73a8d182be42bf90c37c93e7b20e44eaae"}, +] + +[[package]] +name = "alabaster" +version = "0.7.16" +description = "A light, configurable Sphinx theme" +optional = false +python-versions = ">=3.9" +files = [ + {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, + {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, +] + +[[package]] +name = "anyio" +version = "4.6.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +files = [ + {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, + {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "babel" +version = "2.13.1" +description = "Internationalization utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Babel-2.13.1-py3-none-any.whl", hash = "sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"}, + {file = "Babel-2.13.1.tar.gz", hash = "sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"}, +] + +[package.dependencies] +setuptools = {version = "*", markers = "python_version >= \"3.12\""} + +[package.extras] +dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] + +[[package]] +name = "beautifulsoup4" +version = "4.12.3" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, + {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, +] + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "certifi" +version = "2023.7.22" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, + {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, +] + +[[package]] +name = "cffi" +version = "1.16.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, + {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, + {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, + {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, + {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, + {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, + {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, + {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, + {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, + {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, + {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, + {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, + {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, + {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, + {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.3.1" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.1.tar.gz", hash = "sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-win32.whl", hash = "sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f"}, + {file = "charset_normalizer-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-win32.whl", hash = "sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8"}, + {file = "charset_normalizer-3.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-win32.whl", hash = "sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61"}, + {file = "charset_normalizer-3.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63a6f59e2d01310f754c270e4a257426fe5a591dc487f1983b3bbe793cf6bac6"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d6bfc32a68bc0933819cfdfe45f9abc3cae3877e1d90aac7259d57e6e0f85b1"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f3100d86dcd03c03f7e9c3fdb23d92e32abbca07e7c13ebd7ddfbcb06f5991f"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39b70a6f88eebe239fa775190796d55a33cfb6d36b9ffdd37843f7c4c1b5dc67"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e12f8ee80aa35e746230a2af83e81bd6b52daa92a8afaef4fea4a2ce9b9f4fa"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b6cefa579e1237ce198619b76eaa148b71894fb0d6bcf9024460f9bf30fd228"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:61f1e3fb621f5420523abb71f5771a204b33c21d31e7d9d86881b2cffe92c47c"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4f6e2a839f83a6a76854d12dbebde50e4b1afa63e27761549d006fa53e9aa80e"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:1ec937546cad86d0dce5396748bf392bb7b62a9eeb8c66efac60e947697f0e58"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:82ca51ff0fc5b641a2d4e1cc8c5ff108699b7a56d7f3ad6f6da9dbb6f0145b48"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:633968254f8d421e70f91c6ebe71ed0ab140220469cf87a9857e21c16687c034"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-win32.whl", hash = "sha256:c0c72d34e7de5604df0fde3644cc079feee5e55464967d10b24b1de268deceb9"}, + {file = "charset_normalizer-3.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:63accd11149c0f9a99e3bc095bbdb5a464862d77a7e309ad5938fbc8721235ae"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-win32.whl", hash = "sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb"}, + {file = "charset_normalizer-3.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-win32.whl", hash = "sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4"}, + {file = "charset_normalizer-3.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727"}, + {file = "charset_normalizer-3.3.1-py3-none-any.whl", hash = "sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "commonmark" +version = "0.9.1" +description = "Python parser for the CommonMark Markdown spec" +optional = false +python-versions = "*" +files = [ + {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, + {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, +] + +[package.extras] +test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] + +[[package]] +name = "dnspython" +version = "2.4.2" +description = "DNS toolkit" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "dnspython-2.4.2-py3-none-any.whl", hash = "sha256:57c6fbaaeaaf39c891292012060beb141791735dbb4004798328fc2c467402d8"}, + {file = "dnspython-2.4.2.tar.gz", hash = "sha256:8dcfae8c7460a2f84b4072e26f1c9f4101ca20c071649cb7c34e8b6a93d58984"}, +] + +[package.extras] +dnssec = ["cryptography (>=2.6,<42.0)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=0.17.3)", "httpx (>=0.24.1)"] +doq = ["aioquic (>=0.9.20)"] +idna = ["idna (>=2.1,<4.0)"] +trio = ["trio (>=0.14,<0.23)"] +wmi = ["wmi (>=1.5.1,<2.0.0)"] + +[[package]] +name = "docutils" +version = "0.18.1" +description = "Docutils -- Python Documentation Utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"}, + {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"}, +] + +[[package]] +name = "eventlet" +version = "0.33.3" +description = "Highly concurrent networking library" +optional = false +python-versions = "*" +files = [ + {file = "eventlet-0.33.3-py2.py3-none-any.whl", hash = "sha256:e43b9ae05ba4bb477a10307699c9aff7ff86121b2640f9184d29059f5a687df8"}, + {file = "eventlet-0.33.3.tar.gz", hash = "sha256:722803e7eadff295347539da363d68ae155b8b26ae6a634474d0a920be73cfda"}, +] + +[package.dependencies] +dnspython = ">=1.15.0" +greenlet = ">=0.3" +six = ">=1.10.0" + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "futures" +version = "2.2.0" +description = "Backport of the concurrent.futures package from Python 3.2" +optional = false +python-versions = "*" +files = [ + {file = "futures-2.2.0-py2.py3-none-any.whl", hash = "sha256:9fd22b354a4c4755ad8c7d161d93f5026aca4cfe999bd2e53168f14765c02cd6"}, + {file = "futures-2.2.0.tar.gz", hash = "sha256:151c057173474a3a40f897165951c0e33ad04f37de65b6de547ddef107fd0ed3"}, +] + +[[package]] +name = "geomet" +version = "0.2.1.post1" +description = "GeoJSON <-> WKT/WKB conversion utilities" +optional = false +python-versions = ">2.6, !=3.3.*, <4" +files = [ + {file = "geomet-0.2.1.post1-py3-none-any.whl", hash = "sha256:a41a1e336b381416d6cbed7f1745c848e91defaa4d4c1bdc1312732e46ffad2b"}, + {file = "geomet-0.2.1.post1.tar.gz", hash = "sha256:91d754f7c298cbfcabd3befdb69c641c27fe75e808b27aa55028605761d17e95"}, +] + +[package.dependencies] +click = "*" +six = "*" + +[[package]] +name = "gevent" +version = "23.9.1" +description = "Coroutine-based network library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "gevent-23.9.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:a3c5e9b1f766a7a64833334a18539a362fb563f6c4682f9634dea72cbe24f771"}, + {file = "gevent-23.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b101086f109168b23fa3586fccd1133494bdb97f86920a24dc0b23984dc30b69"}, + {file = "gevent-23.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36a549d632c14684bcbbd3014a6ce2666c5f2a500f34d58d32df6c9ea38b6535"}, + {file = "gevent-23.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:272cffdf535978d59c38ed837916dfd2b5d193be1e9e5dcc60a5f4d5025dd98a"}, + {file = "gevent-23.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcb8612787a7f4626aa881ff15ff25439561a429f5b303048f0fca8a1c781c39"}, + {file = "gevent-23.9.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:d57737860bfc332b9b5aa438963986afe90f49645f6e053140cfa0fa1bdae1ae"}, + {file = "gevent-23.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5f3c781c84794926d853d6fb58554dc0dcc800ba25c41d42f6959c344b4db5a6"}, + {file = "gevent-23.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:dbb22a9bbd6a13e925815ce70b940d1578dbe5d4013f20d23e8a11eddf8d14a7"}, + {file = "gevent-23.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:707904027d7130ff3e59ea387dddceedb133cc742b00b3ffe696d567147a9c9e"}, + {file = "gevent-23.9.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:45792c45d60f6ce3d19651d7fde0bc13e01b56bb4db60d3f32ab7d9ec467374c"}, + {file = "gevent-23.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e24c2af9638d6c989caffc691a039d7c7022a31c0363da367c0d32ceb4a0648"}, + {file = "gevent-23.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e1ead6863e596a8cc2a03e26a7a0981f84b6b3e956101135ff6d02df4d9a6b07"}, + {file = "gevent-23.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65883ac026731ac112184680d1f0f1e39fa6f4389fd1fc0bf46cc1388e2599f9"}, + {file = "gevent-23.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7af500da05363e66f122896012acb6e101a552682f2352b618e541c941a011"}, + {file = "gevent-23.9.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:c3e5d2fa532e4d3450595244de8ccf51f5721a05088813c1abd93ad274fe15e7"}, + {file = "gevent-23.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c84d34256c243b0a53d4335ef0bc76c735873986d478c53073861a92566a8d71"}, + {file = "gevent-23.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ada07076b380918829250201df1d016bdafb3acf352f35e5693b59dceee8dd2e"}, + {file = "gevent-23.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:921dda1c0b84e3d3b1778efa362d61ed29e2b215b90f81d498eb4d8eafcd0b7a"}, + {file = "gevent-23.9.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:ed7a048d3e526a5c1d55c44cb3bc06cfdc1947d06d45006cc4cf60dedc628904"}, + {file = "gevent-23.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c1abc6f25f475adc33e5fc2dbcc26a732608ac5375d0d306228738a9ae14d3b"}, + {file = "gevent-23.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4368f341a5f51611411ec3fc62426f52ac3d6d42eaee9ed0f9eebe715c80184e"}, + {file = "gevent-23.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:52b4abf28e837f1865a9bdeef58ff6afd07d1d888b70b6804557e7908032e599"}, + {file = "gevent-23.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52e9f12cd1cda96603ce6b113d934f1aafb873e2c13182cf8e86d2c5c41982ea"}, + {file = "gevent-23.9.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:de350fde10efa87ea60d742901e1053eb2127ebd8b59a7d3b90597eb4e586599"}, + {file = "gevent-23.9.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fde6402c5432b835fbb7698f1c7f2809c8d6b2bd9d047ac1f5a7c1d5aa569303"}, + {file = "gevent-23.9.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:dd6c32ab977ecf7c7b8c2611ed95fa4aaebd69b74bf08f4b4960ad516861517d"}, + {file = "gevent-23.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:455e5ee8103f722b503fa45dedb04f3ffdec978c1524647f8ba72b4f08490af1"}, + {file = "gevent-23.9.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:7ccf0fd378257cb77d91c116e15c99e533374a8153632c48a3ecae7f7f4f09fe"}, + {file = "gevent-23.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d163d59f1be5a4c4efcdd13c2177baaf24aadf721fdf2e1af9ee54a998d160f5"}, + {file = "gevent-23.9.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7532c17bc6c1cbac265e751b95000961715adef35a25d2b0b1813aa7263fb397"}, + {file = "gevent-23.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:78eebaf5e73ff91d34df48f4e35581ab4c84e22dd5338ef32714264063c57507"}, + {file = "gevent-23.9.1-cp38-cp38-win32.whl", hash = "sha256:f632487c87866094546a74eefbca2c74c1d03638b715b6feb12e80120960185a"}, + {file = "gevent-23.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:62d121344f7465e3739989ad6b91f53a6ca9110518231553fe5846dbe1b4518f"}, + {file = "gevent-23.9.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:bf456bd6b992eb0e1e869e2fd0caf817f0253e55ca7977fd0e72d0336a8c1c6a"}, + {file = "gevent-23.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43daf68496c03a35287b8b617f9f91e0e7c0d042aebcc060cadc3f049aadd653"}, + {file = "gevent-23.9.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7c28e38dcde327c217fdafb9d5d17d3e772f636f35df15ffae2d933a5587addd"}, + {file = "gevent-23.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fae8d5b5b8fa2a8f63b39f5447168b02db10c888a3e387ed7af2bd1b8612e543"}, + {file = "gevent-23.9.1-cp39-cp39-win32.whl", hash = "sha256:2c7b5c9912378e5f5ccf180d1fdb1e83f42b71823483066eddbe10ef1a2fcaa2"}, + {file = "gevent-23.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:a2898b7048771917d85a1d548fd378e8a7b2ca963db8e17c6d90c76b495e0e2b"}, + {file = "gevent-23.9.1.tar.gz", hash = "sha256:72c002235390d46f94938a96920d8856d4ffd9ddf62a303a0d7c118894097e34"}, +] + +[package.dependencies] +cffi = {version = ">=1.12.2", markers = "platform_python_implementation == \"CPython\" and sys_platform == \"win32\""} +greenlet = [ + {version = ">=2.0.0", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.11\""}, + {version = ">=3.0rc3", markers = "platform_python_implementation == \"CPython\" and python_version >= \"3.11\""}, +] +"zope.event" = "*" +"zope.interface" = "*" + +[package.extras] +dnspython = ["dnspython (>=1.16.0,<2.0)", "idna"] +docs = ["furo", "repoze.sphinx.autointerface", "sphinx", "sphinxcontrib-programoutput", "zope.schema"] +monitor = ["psutil (>=5.7.0)"] +recommended = ["cffi (>=1.12.2)", "dnspython (>=1.16.0,<2.0)", "idna", "psutil (>=5.7.0)"] +test = ["cffi (>=1.12.2)", "coverage (>=5.0)", "dnspython (>=1.16.0,<2.0)", "idna", "objgraph", "psutil (>=5.7.0)", "requests", "setuptools"] + +[[package]] +name = "greenlet" +version = "3.0.1" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +files = [ + {file = "greenlet-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f89e21afe925fcfa655965ca8ea10f24773a1791400989ff32f467badfe4a064"}, + {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28e89e232c7593d33cac35425b58950789962011cc274aa43ef8865f2e11f46d"}, + {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8ba29306c5de7717b5761b9ea74f9c72b9e2b834e24aa984da99cbfc70157fd"}, + {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19bbdf1cce0346ef7341705d71e2ecf6f41a35c311137f29b8a2dc2341374565"}, + {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599daf06ea59bfedbec564b1692b0166a0045f32b6f0933b0dd4df59a854caf2"}, + {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b641161c302efbb860ae6b081f406839a8b7d5573f20a455539823802c655f63"}, + {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d57e20ba591727da0c230ab2c3f200ac9d6d333860d85348816e1dca4cc4792e"}, + {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5805e71e5b570d490938d55552f5a9e10f477c19400c38bf1d5190d760691846"}, + {file = "greenlet-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:52e93b28db27ae7d208748f45d2db8a7b6a380e0d703f099c949d0f0d80b70e9"}, + {file = "greenlet-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f7bfb769f7efa0eefcd039dd19d843a4fbfbac52f1878b1da2ed5793ec9b1a65"}, + {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e6c7db42638dc45cf2e13c73be16bf83179f7859b07cfc139518941320be96"}, + {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1757936efea16e3f03db20efd0cd50a1c86b06734f9f7338a90c4ba85ec2ad5a"}, + {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19075157a10055759066854a973b3d1325d964d498a805bb68a1f9af4aaef8ec"}, + {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9d21aaa84557d64209af04ff48e0ad5e28c5cca67ce43444e939579d085da72"}, + {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2847e5d7beedb8d614186962c3d774d40d3374d580d2cbdab7f184580a39d234"}, + {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:97e7ac860d64e2dcba5c5944cfc8fa9ea185cd84061c623536154d5a89237884"}, + {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b2c02d2ad98116e914d4f3155ffc905fd0c025d901ead3f6ed07385e19122c94"}, + {file = "greenlet-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:22f79120a24aeeae2b4471c711dcf4f8c736a2bb2fabad2a67ac9a55ea72523c"}, + {file = "greenlet-3.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:100f78a29707ca1525ea47388cec8a049405147719f47ebf3895e7509c6446aa"}, + {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60d5772e8195f4e9ebf74046a9121bbb90090f6550f81d8956a05387ba139353"}, + {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:daa7197b43c707462f06d2c693ffdbb5991cbb8b80b5b984007de431493a319c"}, + {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea6b8aa9e08eea388c5f7a276fabb1d4b6b9d6e4ceb12cc477c3d352001768a9"}, + {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d11ebbd679e927593978aa44c10fc2092bc454b7d13fdc958d3e9d508aba7d0"}, + {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbd4c177afb8a8d9ba348d925b0b67246147af806f0b104af4d24f144d461cd5"}, + {file = "greenlet-3.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20107edf7c2c3644c67c12205dc60b1bb11d26b2610b276f97d666110d1b511d"}, + {file = "greenlet-3.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8bef097455dea90ffe855286926ae02d8faa335ed8e4067326257cb571fc1445"}, + {file = "greenlet-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:b2d3337dcfaa99698aa2377c81c9ca72fcd89c07e7eb62ece3f23a3fe89b2ce4"}, + {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80ac992f25d10aaebe1ee15df45ca0d7571d0f70b645c08ec68733fb7a020206"}, + {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:337322096d92808f76ad26061a8f5fccb22b0809bea39212cd6c406f6a7060d2"}, + {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9934adbd0f6e476f0ecff3c94626529f344f57b38c9a541f87098710b18af0a"}, + {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc4d815b794fd8868c4d67602692c21bf5293a75e4b607bb92a11e821e2b859a"}, + {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41bdeeb552d814bcd7fb52172b304898a35818107cc8778b5101423c9017b3de"}, + {file = "greenlet-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6e6061bf1e9565c29002e3c601cf68569c450be7fc3f7336671af7ddb4657166"}, + {file = "greenlet-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fa24255ae3c0ab67e613556375a4341af04a084bd58764731972bcbc8baeba36"}, + {file = "greenlet-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:b489c36d1327868d207002391f662a1d163bdc8daf10ab2e5f6e41b9b96de3b1"}, + {file = "greenlet-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f33f3258aae89da191c6ebaa3bc517c6c4cbc9b9f689e5d8452f7aedbb913fa8"}, + {file = "greenlet-3.0.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:d2905ce1df400360463c772b55d8e2518d0e488a87cdea13dd2c71dcb2a1fa16"}, + {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a02d259510b3630f330c86557331a3b0e0c79dac3d166e449a39363beaae174"}, + {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55d62807f1c5a1682075c62436702aaba941daa316e9161e4b6ccebbbf38bda3"}, + {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3fcc780ae8edbb1d050d920ab44790201f027d59fdbd21362340a85c79066a74"}, + {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eddd98afc726f8aee1948858aed9e6feeb1758889dfd869072d4465973f6bfd"}, + {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eabe7090db68c981fca689299c2d116400b553f4b713266b130cfc9e2aa9c5a9"}, + {file = "greenlet-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f2f6d303f3dee132b322a14cd8765287b8f86cdc10d2cb6a6fae234ea488888e"}, + {file = "greenlet-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d923ff276f1c1f9680d32832f8d6c040fe9306cbfb5d161b0911e9634be9ef0a"}, + {file = "greenlet-3.0.1-cp38-cp38-win32.whl", hash = "sha256:0b6f9f8ca7093fd4433472fd99b5650f8a26dcd8ba410e14094c1e44cd3ceddd"}, + {file = "greenlet-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:990066bff27c4fcf3b69382b86f4c99b3652bab2a7e685d968cd4d0cfc6f67c6"}, + {file = "greenlet-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ce85c43ae54845272f6f9cd8320d034d7a946e9773c693b27d620edec825e376"}, + {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89ee2e967bd7ff85d84a2de09df10e021c9b38c7d91dead95b406ed6350c6997"}, + {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87c8ceb0cf8a5a51b8008b643844b7f4a8264a2c13fcbcd8a8316161725383fe"}, + {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6a8c9d4f8692917a3dc7eb25a6fb337bff86909febe2f793ec1928cd97bedfc"}, + {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fbc5b8f3dfe24784cee8ce0be3da2d8a79e46a276593db6868382d9c50d97b1"}, + {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85d2b77e7c9382f004b41d9c72c85537fac834fb141b0296942d52bf03fe4a3d"}, + {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:696d8e7d82398e810f2b3622b24e87906763b6ebfd90e361e88eb85b0e554dc8"}, + {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:329c5a2e5a0ee942f2992c5e3ff40be03e75f745f48847f118a3cfece7a28546"}, + {file = "greenlet-3.0.1-cp39-cp39-win32.whl", hash = "sha256:cf868e08690cb89360eebc73ba4be7fb461cfbc6168dd88e2fbbe6f31812cd57"}, + {file = "greenlet-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:ac4a39d1abae48184d420aa8e5e63efd1b75c8444dd95daa3e03f6c6310e9619"}, + {file = "greenlet-3.0.1.tar.gz", hash = "sha256:816bd9488a94cba78d93e1abb58000e8266fa9cc2aa9ccdd6eb0696acb24005b"}, +] + +[package.extras] +docs = ["Sphinx"] +test = ["objgraph", "psutil"] + +[[package]] +name = "gremlinpython" +version = "3.4.7" +description = "Gremlin-Python for Apache TinkerPop" +optional = false +python-versions = "*" +files = [ + {file = "gremlinpython-3.4.7-py2.py3-none-any.whl", hash = "sha256:3fc60881638d370fdd0acc005a536baf2fdb3539d5150f2c787e460382548ac4"}, + {file = "gremlinpython-3.4.7.tar.gz", hash = "sha256:0ebe51bba36606d7d731bdeb4f8558ea7f88abf15f841693da47b994a29ac424"}, +] + +[package.dependencies] +aenum = ">=1.4.5,<3.0.0" +isodate = ">=0.6.0,<1.0.0" +six = ">=1.10.0,<2.0.0" +tornado = ">=4.4.1,<6.0" + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +description = "Getting image size from png/jpeg/jpeg2000/gif file" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, + {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, +] + +[[package]] +name = "isodate" +version = "0.6.1" +description = "An ISO 8601 date/time/duration parser and formatter" +optional = false +python-versions = "*" +files = [ + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "jinja2" +version = "3.1.2" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "markupsafe" +version = "2.1.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] + +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pygments" +version = "2.18.0" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "recommonmark" +version = "0.7.1" +description = "A docutils-compatibility bridge to CommonMark, enabling you to write CommonMark inside of Docutils & Sphinx projects." +optional = false +python-versions = "*" +files = [ + {file = "recommonmark-0.7.1-py2.py3-none-any.whl", hash = "sha256:1b1db69af0231efce3fa21b94ff627ea33dee7079a01dd0a7f8482c3da148b3f"}, + {file = "recommonmark-0.7.1.tar.gz", hash = "sha256:bdb4db649f2222dcd8d2d844f0006b958d627f732415d399791ee436a3686d67"}, +] + +[package.dependencies] +commonmark = ">=0.8.1" +docutils = ">=0.11" +sphinx = ">=1.3.1" + +[[package]] +name = "redirects-cli" +version = "0.1.3" +description = "Generates static redirections from a YAML file." +optional = false +python-versions = ">=3.7" +files = [ + {file = "redirects_cli-0.1.3-py3-none-any.whl", hash = "sha256:8a7a548d5f45b98db7d110fd8affbbb44b966cf250e35b5f4c9bd6541622272d"}, + {file = "redirects_cli-0.1.3.tar.gz", hash = "sha256:0cc6f35ae372d087d56bc03cfc639d6e2eac0771454c3c173ac6f3dc233969bc"}, +] + +[package.dependencies] +colorama = ">=0.4" +typer = ">=0.3" + +[package.extras] +test = ["pre-commit", "pytest"] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "scales" +version = "1.0.9" +description = "Stats for Python processes" +optional = false +python-versions = "*" +files = [ + {file = "scales-1.0.9.tar.gz", hash = "sha256:8b6930f7d4bf115192290b44c757af5e254e3fcfcb75ff9a51f5c96a404e2753"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "setuptools" +version = "74.1.3" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-74.1.3-py3-none-any.whl", hash = "sha256:1cfd66bfcf197bce344da024c8f5b35acc4dcb7ca5202246a75296b4883f6851"}, + {file = "setuptools-74.1.3.tar.gz", hash = "sha256:fbb126f14b0b9ffa54c4574a50ae60673bbe8ae0b1645889d10b3b14f5891d28"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." +optional = false +python-versions = "*" +files = [ + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, +] + +[[package]] +name = "soupsieve" +version = "2.5" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.8" +files = [ + {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, + {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, +] + +[[package]] +name = "sphinx" +version = "7.3.7" +description = "Python documentation generator" +optional = false +python-versions = ">=3.9" +files = [ + {file = "sphinx-7.3.7-py3-none-any.whl", hash = "sha256:413f75440be4cacf328f580b4274ada4565fb2187d696a84970c23f77b64d8c3"}, + {file = "sphinx-7.3.7.tar.gz", hash = "sha256:a4a7db75ed37531c05002d56ed6948d4c42f473a36f46e1382b0bd76ca9627bc"}, +] + +[package.dependencies] +alabaster = ">=0.7.14,<0.8.0" +babel = ">=2.9" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +docutils = ">=0.18.1,<0.22" +imagesize = ">=1.3" +Jinja2 = ">=3.0" +packaging = ">=21.0" +Pygments = ">=2.14" +requests = ">=2.25.0" +snowballstemmer = ">=2.0" +sphinxcontrib-applehelp = "*" +sphinxcontrib-devhelp = "*" +sphinxcontrib-htmlhelp = ">=2.0.0" +sphinxcontrib-jsmath = "*" +sphinxcontrib-qthelp = "*" +sphinxcontrib-serializinghtml = ">=1.1.9" +tomli = {version = ">=2", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["sphinxcontrib-websupport"] +lint = ["flake8 (>=3.5.0)", "importlib_metadata", "mypy (==1.9.0)", "pytest (>=6.0)", "ruff (==0.3.7)", "sphinx-lint", "tomli", "types-docutils", "types-requests"] +test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=6.0)", "setuptools (>=67.0)"] + +[[package]] +name = "sphinx-autobuild" +version = "2024.9.19" +description = "Rebuild Sphinx documentation on changes, with hot reloading in the browser." +optional = false +python-versions = ">=3.9" +files = [ + {file = "sphinx_autobuild-2024.9.19-py3-none-any.whl", hash = "sha256:57d974eebfc6461ff0fd136e78bf7a9c057d543d5166d318a45599898019b82c"}, + {file = "sphinx_autobuild-2024.9.19.tar.gz", hash = "sha256:2dd4863d174e533c1cd075eb5dfc90ad9a21734af7efd25569bf228b405e08ef"}, +] + +[package.dependencies] +colorama = ">=0.4.6" +sphinx = "*" +starlette = ">=0.35" +uvicorn = ">=0.25" +watchfiles = ">=0.20" +websockets = ">=11" + +[package.extras] +test = ["httpx", "pytest (>=6)"] + +[[package]] +name = "sphinx-collapse" +version = "0.1.2" +description = "Collapse extension for Sphinx." +optional = false +python-versions = ">=3.7" +files = [ + {file = "sphinx_collapse-0.1.2-py3-none-any.whl", hash = "sha256:7a2082da3c779916cc4c4d44832db3522a3a8bfbd12598ef01fb9eb523a164d0"}, + {file = "sphinx_collapse-0.1.2.tar.gz", hash = "sha256:a186000bf3fdac8ac0e8a99979f720ae790de15a5efc1435d4816f79a3d377c2"}, +] + +[package.dependencies] +sphinx = ">=3" + +[package.extras] +doc = ["alabaster"] +test = ["pre-commit", "pytest"] + +[[package]] +name = "sphinx-copybutton" +version = "0.5.2" +description = "Add a copy button to each of your code cells." +optional = false +python-versions = ">=3.7" +files = [ + {file = "sphinx-copybutton-0.5.2.tar.gz", hash = "sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd"}, + {file = "sphinx_copybutton-0.5.2-py3-none-any.whl", hash = "sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e"}, +] + +[package.dependencies] +sphinx = ">=1.8" + +[package.extras] +code-style = ["pre-commit (==2.12.1)"] +rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"] + +[[package]] +name = "sphinx-multiversion-scylla" +version = "0.3.1" +description = "Add support for multiple versions to sphinx" +optional = false +python-versions = "*" +files = [ + {file = "sphinx-multiversion-scylla-0.3.1.tar.gz", hash = "sha256:6c04f35ce76b60c4b54d72c52d299624ddc93f2930606bf76db33c214ca38380"}, + {file = "sphinx_multiversion_scylla-0.3.1-py3-none-any.whl", hash = "sha256:762cfb79f4ea2540653a5e8d30f8b604362cebaafb87934895dcc5a8bea6e255"}, +] + +[package.dependencies] +sphinx = ">=2.1" + +[[package]] +name = "sphinx-notfound-page" +version = "1.0.4" +description = "Sphinx extension to build a 404 page with absolute URLs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sphinx_notfound_page-1.0.4-py3-none-any.whl", hash = "sha256:f7c26ae0df3cf3d6f38f56b068762e6203d0ebb7e1c804de1059598d7dd8b9d8"}, + {file = "sphinx_notfound_page-1.0.4.tar.gz", hash = "sha256:2a52f49cd367b5c4e64072de1591cc367714098500abf4ecb9a3ecb4fec25aae"}, +] + +[package.dependencies] +sphinx = ">=5" + +[package.extras] +doc = ["sphinx-autoapi", "sphinx-rtd-theme", "sphinx-tabs", "sphinxemoji"] +test = ["tox"] + +[[package]] +name = "sphinx-scylladb-theme" +version = "1.8.1" +description = "A Sphinx Theme for ScyllaDB documentation projects" +optional = false +python-versions = "<4.0,>=3.10" +files = [ + {file = "sphinx_scylladb_theme-1.8.1-py3-none-any.whl", hash = "sha256:cddc3fd7f0509af8a5668a029abff7c8fea7442fd788036bbd010fe7db22e9f2"}, + {file = "sphinx_scylladb_theme-1.8.1.tar.gz", hash = "sha256:16872cba848fac491e3a3cc62fddd82daacf05c4e63a0c9defb1ec23041bb885"}, +] + +[package.dependencies] +beautifulsoup4 = ">=4.12.3,<5.0.0" +pyyaml = ">=6.0.1,<7.0.0" +setuptools = ">=70.1.1,<75.0.0" +sphinx-collapse = ">=0.1.1,<0.2.0" +sphinx-copybutton = ">=0.5.2,<0.6.0" +sphinx-notfound-page = ">=1.0.4,<2.0.0" +Sphinx-Substitution-Extensions = ">=2022.2.16,<2023.0.0" +sphinx-tabs = ">=3.4.5,<4.0.0" + +[[package]] +name = "sphinx-sitemap" +version = "2.6.0" +description = "Sitemap generator for Sphinx" +optional = false +python-versions = "*" +files = [ + {file = "sphinx_sitemap-2.6.0-py3-none-any.whl", hash = "sha256:7478e417d141f99c9af27ccd635f44c03a471a08b20e778a0f9daef7ace1d30b"}, + {file = "sphinx_sitemap-2.6.0.tar.gz", hash = "sha256:5e0c66b9f2e371ede80c659866a9eaad337d46ab02802f9c7e5f7bc5893c28d2"}, +] + +[package.dependencies] +sphinx = ">=1.2" + +[package.extras] +dev = ["build", "flake8", "pre-commit", "pytest", "sphinx", "tox"] + +[[package]] +name = "sphinx-substitution-extensions" +version = "2022.2.16" +description = "Extensions for Sphinx which allow for substitutions." +optional = false +python-versions = "*" +files = [ + {file = "Sphinx Substitution Extensions-2022.2.16.tar.gz", hash = "sha256:ff7d05bd00e8b2d7eb8a403b9f317d70411d4e9b6812bf91534a50df22190c75"}, + {file = "Sphinx_Substitution_Extensions-2022.2.16-py3-none-any.whl", hash = "sha256:5a8ca34dac3984486344e95c36e3ed4766d402a71bdee7390d600f153db9795b"}, +] + +[package.dependencies] +docutils = ">=0.15" +sphinx = ">=4.0.0" + +[package.extras] +dev = ["autoflake (==1.4)", "black (==22.1.0)", "check-manifest (==0.47)", "doc8 (==0.10.1)", "flake8 (==4.0.1)", "flake8-commas (==2.1.0)", "flake8-quotes (==3.3.1)", "isort (==5.10.1)", "mypy (==0.931)", "pip-check-reqs (==2.3.2)", "pydocstyle (==6.1.1)", "pyenchant (==3.2.2)", "pylint (==2.12.2)", "pyroma (==3.2)", "pytest (==7.0.1)", "pytest-cov (==3.0.0)", "types-docutils (==0.17.5)", "vulture (==2.3)"] +prompt = ["sphinx-prompt (>=0.1)"] + +[[package]] +name = "sphinx-tabs" +version = "3.4.5" +description = "Tabbed views for Sphinx" +optional = false +python-versions = "~=3.7" +files = [ + {file = "sphinx-tabs-3.4.5.tar.gz", hash = "sha256:ba9d0c1e3e37aaadd4b5678449eb08176770e0fc227e769b6ce747df3ceea531"}, + {file = "sphinx_tabs-3.4.5-py3-none-any.whl", hash = "sha256:92cc9473e2ecf1828ca3f6617d0efc0aa8acb06b08c56ba29d1413f2f0f6cf09"}, +] + +[package.dependencies] +docutils = "*" +pygments = "*" +sphinx = "*" + +[package.extras] +code-style = ["pre-commit (==2.13.0)"] +testing = ["bs4", "coverage", "pygments", "pytest (>=7.1,<8)", "pytest-cov", "pytest-regressions", "rinohtype"] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "1.0.7" +description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" +optional = false +python-versions = ">=3.9" +files = [ + {file = "sphinxcontrib_applehelp-1.0.7-py3-none-any.whl", hash = "sha256:094c4d56209d1734e7d252f6e0b3ccc090bd52ee56807a5d9315b19c122ab15d"}, + {file = "sphinxcontrib_applehelp-1.0.7.tar.gz", hash = "sha256:39fdc8d762d33b01a7d8f026a3b7d71563ea3b72787d5f00ad8465bd9d6dfbfa"}, +] + +[package.dependencies] +Sphinx = ">=5" + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "1.0.5" +description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" +optional = false +python-versions = ">=3.9" +files = [ + {file = "sphinxcontrib_devhelp-1.0.5-py3-none-any.whl", hash = "sha256:fe8009aed765188f08fcaadbb3ea0d90ce8ae2d76710b7e29ea7d047177dae2f"}, + {file = "sphinxcontrib_devhelp-1.0.5.tar.gz", hash = "sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212"}, +] + +[package.dependencies] +Sphinx = ">=5" + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.0.4" +description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" +optional = false +python-versions = ">=3.9" +files = [ + {file = "sphinxcontrib_htmlhelp-2.0.4-py3-none-any.whl", hash = "sha256:8001661c077a73c29beaf4a79968d0726103c5605e27db92b9ebed8bab1359e9"}, + {file = "sphinxcontrib_htmlhelp-2.0.4.tar.gz", hash = "sha256:6c26a118a05b76000738429b724a0568dbde5b72391a688577da08f11891092a"}, +] + +[package.dependencies] +Sphinx = ">=5" + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["html5lib", "pytest"] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +description = "A sphinx extension which renders display math in HTML via JavaScript" +optional = false +python-versions = ">=3.5" +files = [ + {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, + {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, +] + +[package.extras] +test = ["flake8", "mypy", "pytest"] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "1.0.6" +description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" +optional = false +python-versions = ">=3.9" +files = [ + {file = "sphinxcontrib_qthelp-1.0.6-py3-none-any.whl", hash = "sha256:bf76886ee7470b934e363da7a954ea2825650013d367728588732c7350f49ea4"}, + {file = "sphinxcontrib_qthelp-1.0.6.tar.gz", hash = "sha256:62b9d1a186ab7f5ee3356d906f648cacb7a6bdb94d201ee7adf26db55092982d"}, +] + +[package.dependencies] +Sphinx = ">=5" + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["pytest"] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "1.1.9" +description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" +optional = false +python-versions = ">=3.9" +files = [ + {file = "sphinxcontrib_serializinghtml-1.1.9-py3-none-any.whl", hash = "sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1"}, + {file = "sphinxcontrib_serializinghtml-1.1.9.tar.gz", hash = "sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54"}, +] + +[package.dependencies] +Sphinx = ">=5" + +[package.extras] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["pytest"] + +[[package]] +name = "starlette" +version = "0.39.1" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.8" +files = [ + {file = "starlette-0.39.1-py3-none-any.whl", hash = "sha256:0d31c90dacae588734e91b98cb4469fd37848ef23d2dd34355c5542bc827c02a"}, + {file = "starlette-0.39.1.tar.gz", hash = "sha256:33c5a94f64d3ab2c799b2715b45f254a3752f229d334f1562a3aaf78c23eab95"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tornado" +version = "5.1.1" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">= 2.7, !=3.0.*, !=3.1.*, !=3.2.*, != 3.3.*" +files = [ + {file = "tornado-5.1.1-cp35-cp35m-win32.whl", hash = "sha256:732e836008c708de2e89a31cb2fa6c0e5a70cb60492bee6f1ea1047500feaf7f"}, + {file = "tornado-5.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:0662d28b1ca9f67108c7e3b77afabfb9c7e87bde174fbda78186ecedc2499a9d"}, + {file = "tornado-5.1.1-cp36-cp36m-win32.whl", hash = "sha256:8154ec22c450df4e06b35f131adc4f2f3a12ec85981a203301d310abf580500f"}, + {file = "tornado-5.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:d4b3e5329f572f055b587efc57d29bd051589fb5a43ec8898c77a47ec2fa2bbb"}, + {file = "tornado-5.1.1-cp37-cp37m-win32.whl", hash = "sha256:e5f2585afccbff22390cddac29849df463b252b711aa2ce7c5f3f342a5b3b444"}, + {file = "tornado-5.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:8e9d728c4579682e837c92fdd98036bd5cdefa1da2aaf6acf26947e6dd0c01c5"}, + {file = "tornado-5.1.1.tar.gz", hash = "sha256:4e5158d97583502a7e2739951553cbd88a72076f152b4b11b64b9a10c4c49409"}, +] + +[[package]] +name = "typer" +version = "0.9.0" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.6" +files = [ + {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, + {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, +] + +[package.dependencies] +click = ">=7.1.1,<9.0.0" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] +doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] + +[[package]] +name = "typing-extensions" +version = "4.8.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, + {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, +] + +[[package]] +name = "urllib3" +version = "2.0.7" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.7" +files = [ + {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"}, + {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uvicorn" +version = "0.31.0" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.8" +files = [ + {file = "uvicorn-0.31.0-py3-none-any.whl", hash = "sha256:cac7be4dd4d891c363cd942160a7b02e69150dcbc7a36be04d5f4af4b17c8ced"}, + {file = "uvicorn-0.31.0.tar.gz", hash = "sha256:13bc21373d103859f68fe739608e2eb054a816dea79189bc3ca08ea89a275906"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "watchfiles" +version = "0.24.0" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchfiles-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:083dc77dbdeef09fa44bb0f4d1df571d2e12d8a8f985dccde71ac3ac9ac067a0"}, + {file = "watchfiles-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e94e98c7cb94cfa6e071d401ea3342767f28eb5a06a58fafdc0d2a4974f4f35c"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82ae557a8c037c42a6ef26c494d0631cacca040934b101d001100ed93d43f361"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acbfa31e315a8f14fe33e3542cbcafc55703b8f5dcbb7c1eecd30f141df50db3"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b74fdffce9dfcf2dc296dec8743e5b0332d15df19ae464f0e249aa871fc1c571"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:449f43f49c8ddca87c6b3980c9284cab6bd1f5c9d9a2b00012adaaccd5e7decd"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4abf4ad269856618f82dee296ac66b0cd1d71450fc3c98532d93798e73399b7a"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f895d785eb6164678ff4bb5cc60c5996b3ee6df3edb28dcdeba86a13ea0465e"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ae3e208b31be8ce7f4c2c0034f33406dd24fbce3467f77223d10cd86778471c"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2efec17819b0046dde35d13fb8ac7a3ad877af41ae4640f4109d9154ed30a188"}, + {file = "watchfiles-0.24.0-cp310-none-win32.whl", hash = "sha256:6bdcfa3cd6fdbdd1a068a52820f46a815401cbc2cb187dd006cb076675e7b735"}, + {file = "watchfiles-0.24.0-cp310-none-win_amd64.whl", hash = "sha256:54ca90a9ae6597ae6dc00e7ed0a040ef723f84ec517d3e7ce13e63e4bc82fa04"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bdcd5538e27f188dd3c804b4a8d5f52a7fc7f87e7fd6b374b8e36a4ca03db428"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2dadf8a8014fde6addfd3c379e6ed1a981c8f0a48292d662e27cabfe4239c83c"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6509ed3f467b79d95fc62a98229f79b1a60d1b93f101e1c61d10c95a46a84f43"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8360f7314a070c30e4c976b183d1d8d1585a4a50c5cb603f431cebcbb4f66327"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:316449aefacf40147a9efaf3bd7c9bdd35aaba9ac5d708bd1eb5763c9a02bef5"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73bde715f940bea845a95247ea3e5eb17769ba1010efdc938ffcb967c634fa61"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3770e260b18e7f4e576edca4c0a639f704088602e0bc921c5c2e721e3acb8d15"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa0fd7248cf533c259e59dc593a60973a73e881162b1a2f73360547132742823"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d7a2e3b7f5703ffbd500dabdefcbc9eafeff4b9444bbdd5d83d79eedf8428fab"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d831ee0a50946d24a53821819b2327d5751b0c938b12c0653ea5be7dea9c82ec"}, + {file = "watchfiles-0.24.0-cp311-none-win32.whl", hash = "sha256:49d617df841a63b4445790a254013aea2120357ccacbed00253f9c2b5dc24e2d"}, + {file = "watchfiles-0.24.0-cp311-none-win_amd64.whl", hash = "sha256:d3dcb774e3568477275cc76554b5a565024b8ba3a0322f77c246bc7111c5bb9c"}, + {file = "watchfiles-0.24.0-cp311-none-win_arm64.whl", hash = "sha256:9301c689051a4857d5b10777da23fafb8e8e921bcf3abe6448a058d27fb67633"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7211b463695d1e995ca3feb38b69227e46dbd03947172585ecb0588f19b0d87a"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b8693502d1967b00f2fb82fc1e744df128ba22f530e15b763c8d82baee15370"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdab9555053399318b953a1fe1f586e945bc8d635ce9d05e617fd9fe3a4687d6"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34e19e56d68b0dad5cff62273107cf5d9fbaf9d75c46277aa5d803b3ef8a9e9b"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41face41f036fee09eba33a5b53a73e9a43d5cb2c53dad8e61fa6c9f91b5a51e"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5148c2f1ea043db13ce9b0c28456e18ecc8f14f41325aa624314095b6aa2e9ea"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e4bd963a935aaf40b625c2499f3f4f6bbd0c3776f6d3bc7c853d04824ff1c9f"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c79d7719d027b7a42817c5d96461a99b6a49979c143839fc37aa5748c322f234"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:32aa53a9a63b7f01ed32e316e354e81e9da0e6267435c7243bf8ae0f10b428ef"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce72dba6a20e39a0c628258b5c308779b8697f7676c254a845715e2a1039b968"}, + {file = "watchfiles-0.24.0-cp312-none-win32.whl", hash = "sha256:d9018153cf57fc302a2a34cb7564870b859ed9a732d16b41a9b5cb2ebed2d444"}, + {file = "watchfiles-0.24.0-cp312-none-win_amd64.whl", hash = "sha256:551ec3ee2a3ac9cbcf48a4ec76e42c2ef938a7e905a35b42a1267fa4b1645896"}, + {file = "watchfiles-0.24.0-cp312-none-win_arm64.whl", hash = "sha256:b52a65e4ea43c6d149c5f8ddb0bef8d4a1e779b77591a458a893eb416624a418"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2e3ab79a1771c530233cadfd277fcc762656d50836c77abb2e5e72b88e3a48"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327763da824817b38ad125dcd97595f942d720d32d879f6c4ddf843e3da3fe90"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd82010f8ab451dabe36054a1622870166a67cf3fce894f68895db6f74bbdc94"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d64ba08db72e5dfd5c33be1e1e687d5e4fcce09219e8aee893a4862034081d4e"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1cf1f6dd7825053f3d98f6d33f6464ebdd9ee95acd74ba2c34e183086900a827"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43e3e37c15a8b6fe00c1bce2473cfa8eb3484bbeecf3aefbf259227e487a03df"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88bcd4d0fe1d8ff43675360a72def210ebad3f3f72cabfeac08d825d2639b4ab"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:999928c6434372fde16c8f27143d3e97201160b48a614071261701615a2a156f"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:30bbd525c3262fd9f4b1865cb8d88e21161366561cd7c9e1194819e0a33ea86b"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edf71b01dec9f766fb285b73930f95f730bb0943500ba0566ae234b5c1618c18"}, + {file = "watchfiles-0.24.0-cp313-none-win32.whl", hash = "sha256:f4c96283fca3ee09fb044f02156d9570d156698bc3734252175a38f0e8975f07"}, + {file = "watchfiles-0.24.0-cp313-none-win_amd64.whl", hash = "sha256:a974231b4fdd1bb7f62064a0565a6b107d27d21d9acb50c484d2cdba515b9366"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:ee82c98bed9d97cd2f53bdb035e619309a098ea53ce525833e26b93f673bc318"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fd92bbaa2ecdb7864b7600dcdb6f2f1db6e0346ed425fbd01085be04c63f0b05"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f83df90191d67af5a831da3a33dd7628b02a95450e168785586ed51e6d28943c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fca9433a45f18b7c779d2bae7beeec4f740d28b788b117a48368d95a3233ed83"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b995bfa6bf01a9e09b884077a6d37070464b529d8682d7691c2d3b540d357a0c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed9aba6e01ff6f2e8285e5aa4154e2970068fe0fc0998c4380d0e6278222269b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5171ef898299c657685306d8e1478a45e9303ddcd8ac5fed5bd52ad4ae0b69b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4933a508d2f78099162da473841c652ad0de892719043d3f07cc83b33dfd9d91"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95cf3b95ea665ab03f5a54765fa41abf0529dbaf372c3b83d91ad2cfa695779b"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:01def80eb62bd5db99a798d5e1f5f940ca0a05986dcfae21d833af7a46f7ee22"}, + {file = "watchfiles-0.24.0-cp38-none-win32.whl", hash = "sha256:4d28cea3c976499475f5b7a2fec6b3a36208656963c1a856d328aeae056fc5c1"}, + {file = "watchfiles-0.24.0-cp38-none-win_amd64.whl", hash = "sha256:21ab23fdc1208086d99ad3f69c231ba265628014d4aed31d4e8746bd59e88cd1"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b665caeeda58625c3946ad7308fbd88a086ee51ccb706307e5b1fa91556ac886"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5c51749f3e4e269231510da426ce4a44beb98db2dce9097225c338f815b05d4f"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b2509f08761f29a0fdad35f7e1638b8ab1adfa2666d41b794090361fb8b855"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a60e2bf9dc6afe7f743e7c9b149d1fdd6dbf35153c78fe3a14ae1a9aee3d98b"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7d9b87c4c55e3ea8881dfcbf6d61ea6775fffed1fedffaa60bd047d3c08c430"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78470906a6be5199524641f538bd2c56bb809cd4bf29a566a75051610bc982c3"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07cdef0c84c03375f4e24642ef8d8178e533596b229d32d2bbd69e5128ede02a"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d337193bbf3e45171c8025e291530fb7548a93c45253897cd764a6a71c937ed9"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ec39698c45b11d9694a1b635a70946a5bad066b593af863460a8e600f0dff1ca"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2e28d91ef48eab0afb939fa446d8ebe77e2f7593f5f463fd2bb2b14132f95b6e"}, + {file = "watchfiles-0.24.0-cp39-none-win32.whl", hash = "sha256:7138eff8baa883aeaa074359daabb8b6c1e73ffe69d5accdc907d62e50b1c0da"}, + {file = "watchfiles-0.24.0-cp39-none-win_amd64.whl", hash = "sha256:b3ef2c69c655db63deb96b3c3e587084612f9b1fa983df5e0c3379d41307467f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632676574429bee8c26be8af52af20e0c718cc7f5f67f3fb658c71928ccd4f7f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a2a9891723a735d3e2540651184be6fd5b96880c08ffe1a98bae5017e65b544b"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7fa2bc0efef3e209a8199fd111b8969fe9db9c711acc46636686331eda7dd4"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01550ccf1d0aed6ea375ef259706af76ad009ef5b0203a3a4cce0f6024f9b68a"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:96619302d4374de5e2345b2b622dc481257a99431277662c30f606f3e22f42be"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:85d5f0c7771dcc7a26c7a27145059b6bb0ce06e4e751ed76cdf123d7039b60b5"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951088d12d339690a92cef2ec5d3cfd957692834c72ffd570ea76a6790222777"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fb58bcaa343fedc6a9e91f90195b20ccb3135447dc9e4e2570c3a39565853e"}, + {file = "watchfiles-0.24.0.tar.gz", hash = "sha256:afb72325b74fa7a428c009c1b8be4b4d7c2afedafb2982827ef2156646df2fe1"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + +[[package]] +name = "websockets" +version = "13.1" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, +] + +[[package]] +name = "zope-event" +version = "5.0" +description = "Very basic event publishing system" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zope.event-5.0-py3-none-any.whl", hash = "sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26"}, + {file = "zope.event-5.0.tar.gz", hash = "sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd"}, +] + +[package.dependencies] +setuptools = "*" + +[package.extras] +docs = ["Sphinx"] +test = ["zope.testrunner"] + +[[package]] +name = "zope-interface" +version = "6.1" +description = "Interfaces for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zope.interface-6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:43b576c34ef0c1f5a4981163b551a8781896f2a37f71b8655fd20b5af0386abb"}, + {file = "zope.interface-6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:67be3ca75012c6e9b109860820a8b6c9a84bfb036fbd1076246b98e56951ca92"}, + {file = "zope.interface-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b9bc671626281f6045ad61d93a60f52fd5e8209b1610972cf0ef1bbe6d808e3"}, + {file = "zope.interface-6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbe81def9cf3e46f16ce01d9bfd8bea595e06505e51b7baf45115c77352675fd"}, + {file = "zope.interface-6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dc998f6de015723196a904045e5a2217f3590b62ea31990672e31fbc5370b41"}, + {file = "zope.interface-6.1-cp310-cp310-win_amd64.whl", hash = "sha256:239a4a08525c080ff833560171d23b249f7f4d17fcbf9316ef4159f44997616f"}, + {file = "zope.interface-6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9ffdaa5290422ac0f1688cb8adb1b94ca56cee3ad11f29f2ae301df8aecba7d1"}, + {file = "zope.interface-6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:34c15ca9248f2e095ef2e93af2d633358c5f048c49fbfddf5fdfc47d5e263736"}, + {file = "zope.interface-6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b012d023b4fb59183909b45d7f97fb493ef7a46d2838a5e716e3155081894605"}, + {file = "zope.interface-6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97806e9ca3651588c1baaebb8d0c5ee3db95430b612db354c199b57378312ee8"}, + {file = "zope.interface-6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fddbab55a2473f1d3b8833ec6b7ac31e8211b0aa608df5ab09ce07f3727326de"}, + {file = "zope.interface-6.1-cp311-cp311-win_amd64.whl", hash = "sha256:a0da79117952a9a41253696ed3e8b560a425197d4e41634a23b1507efe3273f1"}, + {file = "zope.interface-6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8bb9c990ca9027b4214fa543fd4025818dc95f8b7abce79d61dc8a2112b561a"}, + {file = "zope.interface-6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b51b64432eed4c0744241e9ce5c70dcfecac866dff720e746d0a9c82f371dfa7"}, + {file = "zope.interface-6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa6fd016e9644406d0a61313e50348c706e911dca29736a3266fc9e28ec4ca6d"}, + {file = "zope.interface-6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c8cf55261e15590065039696607f6c9c1aeda700ceee40c70478552d323b3ff"}, + {file = "zope.interface-6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e30506bcb03de8983f78884807e4fd95d8db6e65b69257eea05d13d519b83ac0"}, + {file = "zope.interface-6.1-cp312-cp312-win_amd64.whl", hash = "sha256:e33e86fd65f369f10608b08729c8f1c92ec7e0e485964670b4d2633a4812d36b"}, + {file = "zope.interface-6.1-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:2f8d89721834524a813f37fa174bac074ec3d179858e4ad1b7efd4401f8ac45d"}, + {file = "zope.interface-6.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13b7d0f2a67eb83c385880489dbb80145e9d344427b4262c49fbf2581677c11c"}, + {file = "zope.interface-6.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef43ee91c193f827e49599e824385ec7c7f3cd152d74cb1dfe02cb135f264d83"}, + {file = "zope.interface-6.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e441e8b7d587af0414d25e8d05e27040d78581388eed4c54c30c0c91aad3a379"}, + {file = "zope.interface-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f89b28772fc2562ed9ad871c865f5320ef761a7fcc188a935e21fe8b31a38ca9"}, + {file = "zope.interface-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70d2cef1bf529bff41559be2de9d44d47b002f65e17f43c73ddefc92f32bf00f"}, + {file = "zope.interface-6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ad54ed57bdfa3254d23ae04a4b1ce405954969c1b0550cc2d1d2990e8b439de1"}, + {file = "zope.interface-6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef467d86d3cfde8b39ea1b35090208b0447caaabd38405420830f7fd85fbdd56"}, + {file = "zope.interface-6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6af47f10cfc54c2ba2d825220f180cc1e2d4914d783d6fc0cd93d43d7bc1c78b"}, + {file = "zope.interface-6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9559138690e1bd4ea6cd0954d22d1e9251e8025ce9ede5d0af0ceae4a401e43"}, + {file = "zope.interface-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:964a7af27379ff4357dad1256d9f215047e70e93009e532d36dcb8909036033d"}, + {file = "zope.interface-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:387545206c56b0315fbadb0431d5129c797f92dc59e276b3ce82db07ac1c6179"}, + {file = "zope.interface-6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:57d0a8ce40ce440f96a2c77824ee94bf0d0925e6089df7366c2272ccefcb7941"}, + {file = "zope.interface-6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ebc4d34e7620c4f0da7bf162c81978fce0ea820e4fa1e8fc40ee763839805f3"}, + {file = "zope.interface-6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a804abc126b33824a44a7aa94f06cd211a18bbf31898ba04bd0924fbe9d282d"}, + {file = "zope.interface-6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f294a15f7723fc0d3b40701ca9b446133ec713eafc1cc6afa7b3d98666ee1ac"}, + {file = "zope.interface-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:a41f87bb93b8048fe866fa9e3d0c51e27fe55149035dcf5f43da4b56732c0a40"}, + {file = "zope.interface-6.1.tar.gz", hash = "sha256:2fdc7ccbd6eb6b7df5353012fbed6c3c5d04ceaca0038f75e601060e95345309"}, +] + +[package.dependencies] +setuptools = "*" + +[package.extras] +docs = ["Sphinx", "repoze.sphinx.autointerface", "sphinx-rtd-theme"] +test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] +testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.10" +content-hash = "8f7b4cb1dfb489f9f4abdb06ca417d2d2947629c338eeed5d4cab8ce73aec0c0" diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 6513716249..47a336674d 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -3,6 +3,7 @@ name = "python-driver-docs" version = "0.1.0" description = "ScyllaDB Python Driver Docs" authors = ["Python Driver Contributors"] +package-mode = false [tool.poetry.dependencies] eventlet = "^0.33.3" @@ -10,19 +11,18 @@ futures = "2.2.0" geomet = ">=0.1,<0.3" gevent = "^23.9.1" gremlinpython = "3.4.7" -python = "^3.9" -pyyaml = "6.0.1" -pygments = "2.15.1" +python = "^3.10" +pygments = "^2.18.0" recommonmark = "0.7.1" redirects_cli ="~0.1.2" -sphinx-autobuild = "2021.3.14" -sphinx-sitemap = "2.5.1" -sphinx-scylladb-theme = "~1.7.2" -sphinx-multiversion-scylla = "~0.3.1" -Sphinx = "7.2.6" +sphinx-autobuild = "^2024.4.19" +sphinx-sitemap = "^2.6.0" +sphinx-scylladb-theme = "^1.8.1" +sphinx-multiversion-scylla = "^0.3.1" +Sphinx = "^7.3.7" scales = "^1.0.9" six = ">=1.9" [build-system] -requires = ["poetry>=0.12"] +requires = ["poetry>=1.8.0"] build-backend = "poetry.masonry.api" From 3c04eff1c5fedc73f03da942e2eba42fd9577acc Mon Sep 17 00:00:00 2001 From: David Garcia Date: Sun, 29 Sep 2024 18:23:28 +0100 Subject: [PATCH 267/551] docs: update command --- docs/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/Makefile b/docs/Makefile index d1c3a4c8ec..51fc55beef 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -25,6 +25,9 @@ setupenv: .PHONY: setup setup: $(POETRY) install + +.PHONY: update +update: $(POETRY) update # Clean commands From e4a000fdf6548b1cfde477567304b667fcb4cb96 Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Fri, 27 Sep 2024 16:03:53 -0400 Subject: [PATCH 268/551] Introduce metadata_request_timeout configuration option This option allows user to control timeout for driver internal queries. Idea is to make driver queries more resilient and being independent of user queries. --- cassandra/cluster.py | 28 ++++++++++++--- cassandra/metadata.py | 39 +++++++++++---------- tests/integration/standard/test_metadata.py | 6 ++-- tests/unit/test_metadata.py | 2 +- 4 files changed, 49 insertions(+), 26 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 06e6293ef8..6d79636c42 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -19,6 +19,7 @@ from __future__ import absolute_import import atexit +import datetime from binascii import hexlify from collections import defaultdict from collections.abc import Mapping @@ -1033,6 +1034,12 @@ def default_retry_policy(self, policy): or to disable the shardaware port (advanced shardaware) """ + metadata_request_timeout = datetime.timedelta(seconds=2) + """ + Timeout for all queries used by driver it self. + Supported only by Scylla clusters. + """ + @property def schema_metadata_enabled(self): """ @@ -1148,7 +1155,9 @@ def __init__(self, client_id=None, cloud=None, scylla_cloud=None, - shard_aware_options=None): + shard_aware_options=None, + metadata_request_timeout=None, + ): """ ``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as extablishing connection pools or refreshing metadata. @@ -1240,6 +1249,8 @@ def __init__(self, self.no_compact = no_compact self.auth_provider = auth_provider + if metadata_request_timeout is not None: + self.metadata_request_timeout = metadata_request_timeout if load_balancing_policy is not None: if isinstance(load_balancing_policy, type): @@ -3549,6 +3560,7 @@ class PeersQueryType(object): _is_shutdown = False _timeout = None _protocol_version = None + _metadata_request_timeout = None _schema_event_refresh_window = None _topology_event_refresh_window = None @@ -3648,7 +3660,7 @@ def _reconnect_internal(self): (conn, _) = self._connect_host_in_lbp() if conn is not None: return conn - + # Try to re-resolve hostnames as a fallback when all hosts are unreachable self._cluster._resolve_hostnames() @@ -3693,7 +3705,10 @@ def _try_connect(self, host): # If sharding information is available, it's a ScyllaDB cluster, so do not use peers_v2 table. if connection.features.sharding_info is not None: self._uses_peers_v2 = False - + + # Cassandra does not support "USING TIMEOUT" + self._metadata_request_timeout = None if connection.features.sharding_info is None \ + else datetime.timedelta(seconds=self._cluster.control_connection_timeout) self._tablets_routing_v1 = connection.features.tablets_routing_v1 # use weak references in both directions @@ -3830,7 +3845,12 @@ def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_w log.debug("Skipping schema refresh due to lack of schema agreement") return False - self._cluster.metadata.refresh(connection, self._timeout, fetch_size=self._schema_meta_page_size, **kwargs) + self._cluster.metadata.refresh( + connection, + self._timeout, + fetch_size=self._schema_meta_page_size, + metadata_request_timeout=self._metadata_request_timeout, + **kwargs) return True diff --git a/cassandra/metadata.py b/cassandra/metadata.py index edee822e40..34a4df127f 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -134,11 +134,12 @@ def export_schema_as_string(self): """ return "\n\n".join(ks.export_as_string() for ks in self.keyspaces.values()) - def refresh(self, connection, timeout, target_type=None, change_type=None, fetch_size=None, **kwargs): + def refresh(self, connection, timeout, target_type=None, change_type=None, fetch_size=None, + metadata_request_timeout=None, **kwargs): server_version = self.get_host(connection.original_endpoint).release_version dse_version = self.get_host(connection.original_endpoint).dse_version - parser = get_schema_parser(connection, server_version, dse_version, timeout, fetch_size) + parser = get_schema_parser(connection, server_version, dse_version, timeout, metadata_request_timeout, fetch_size) if not target_type: self._rebuild_all(parser) @@ -1946,11 +1947,11 @@ def export_as_string(self): class _SchemaParser(object): - - def __init__(self, connection, timeout, fetch_size): + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): self.connection = connection self.timeout = timeout self.fetch_size = fetch_size + self.metadata_request_timeout = metadata_request_timeout def _handle_results(self, success, result, expected_failures=tuple(), query_msg=None, timeout=None): """ @@ -2054,8 +2055,8 @@ class SchemaParserV22(_SchemaParser): "compression", "default_time_to_live") - def __init__(self, connection, timeout, fetch_size): - super(SchemaParserV22, self).__init__(connection, timeout, fetch_size) + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): + super(SchemaParserV22, self).__init__(connection, timeout, fetch_size, metadata_request_timeout) self.keyspaces_result = [] self.tables_result = [] self.columns_result = [] @@ -2575,8 +2576,8 @@ class SchemaParserV3(SchemaParserV22): 'read_repair_chance', 'speculative_retry') - def __init__(self, connection, timeout, fetch_size): - super(SchemaParserV3, self).__init__(connection, timeout, fetch_size) + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): + super(SchemaParserV3, self).__init__(connection, timeout, fetch_size, metadata_request_timeout) self.indexes_result = [] self.keyspace_table_index_rows = defaultdict(lambda: defaultdict(list)) self.keyspace_view_rows = defaultdict(list) @@ -2860,8 +2861,8 @@ class SchemaParserV4(SchemaParserV3): _SELECT_VIRTUAL_TABLES = 'SELECT * from system_virtual_schema.tables' _SELECT_VIRTUAL_COLUMNS = 'SELECT * from system_virtual_schema.columns' - def __init__(self, connection, timeout, fetch_size): - super(SchemaParserV4, self).__init__(connection, timeout, fetch_size) + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): + super(SchemaParserV4, self).__init__(connection, timeout, fetch_size, metadata_request_timeout) self.virtual_keyspaces_rows = defaultdict(list) self.virtual_tables_rows = defaultdict(list) self.virtual_columns_rows = defaultdict(lambda: defaultdict(list)) @@ -2995,8 +2996,8 @@ class SchemaParserDSE68(SchemaParserDSE67): _table_metadata_class = TableMetadataDSE68 - def __init__(self, connection, timeout, fetch_size): - super(SchemaParserDSE68, self).__init__(connection, timeout, fetch_size) + def __init__(self, connection, timeout, fetch_size, metadata_request_timeout): + super(SchemaParserDSE68, self).__init__(connection, timeout, fetch_size, metadata_request_timeout) self.keyspace_table_vertex_rows = defaultdict(lambda: defaultdict(list)) self.keyspace_table_edge_rows = defaultdict(lambda: defaultdict(list)) @@ -3361,25 +3362,25 @@ def __init__( self.to_clustering_columns = to_clustering_columns -def get_schema_parser(connection, server_version, dse_version, timeout, fetch_size=None): +def get_schema_parser(connection, server_version, dse_version, timeout, metadata_request_timeout, fetch_size=None): version = Version(server_version) if dse_version: v = Version(dse_version) if v >= Version('6.8.0'): - return SchemaParserDSE68(connection, timeout, fetch_size) + return SchemaParserDSE68(connection, timeout, fetch_size, metadata_request_timeout) elif v >= Version('6.7.0'): - return SchemaParserDSE67(connection, timeout, fetch_size) + return SchemaParserDSE67(connection, timeout, fetch_size, metadata_request_timeout) elif v >= Version('6.0.0'): - return SchemaParserDSE60(connection, timeout, fetch_size) + return SchemaParserDSE60(connection, timeout, fetch_size, metadata_request_timeout) if version >= Version('4-a'): - return SchemaParserV4(connection, timeout, fetch_size) + return SchemaParserV4(connection, timeout, fetch_size, metadata_request_timeout) elif version >= Version('3.0.0'): - return SchemaParserV3(connection, timeout, fetch_size) + return SchemaParserV3(connection, timeout, fetch_size, metadata_request_timeout) else: # we could further specialize by version. Right now just refactoring the # multi-version parser we have as of C* 2.2.0rc1. - return SchemaParserV22(connection, timeout, fetch_size) + return SchemaParserV22(connection, timeout, fetch_size, metadata_request_timeout) def _cql_from_cass_type(cass_type): diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index f706e7c0bd..8fc50ce89e 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -243,7 +243,8 @@ def test_basic_table_meta_properties(self): cc, self.cluster.metadata.get_host(cc.host).release_version, self.cluster.metadata.get_host(cc.host).dse_version, - 1 + 1, + None, ) for option in tablemeta.options: @@ -1968,7 +1969,8 @@ def setup_class(cls): connection, cls.cluster.metadata.get_host(connection.host).release_version, cls.cluster.metadata.get_host(connection.host).dse_version, - timeout=20 + 20, + None, ).__class__ cls.cluster.control_connection.reconnect = Mock() diff --git a/tests/unit/test_metadata.py b/tests/unit/test_metadata.py index 94fed13455..dcb9928430 100644 --- a/tests/unit/test_metadata.py +++ b/tests/unit/test_metadata.py @@ -618,7 +618,7 @@ def test_build_index_as_cql(self): column_meta.table.name = 'table_name_here' column_meta.table.keyspace_name = 'keyspace_name_here' column_meta.table.columns = {column_meta.name: column_meta} - parser = get_schema_parser(Mock(), '2.1.0', None, 0.1) + parser = get_schema_parser(Mock(), '2.1.0', None, 0.1, None) row = {'index_name': 'index_name_here', 'index_type': 'index_type_here'} index_meta = parser._build_index_metadata(column_meta, row) From 4beebd5c8c8599a4faecfdc5a5ace6c835923da8 Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Fri, 27 Sep 2024 16:10:01 -0400 Subject: [PATCH 269/551] Use metadata_request_timeout for all driver queries --- cassandra/cluster.py | 28 ++++--- cassandra/metadata.py | 184 +++++++++++++++++++++++++++++------------- cassandra/query.py | 11 ++- cassandra/util.py | 10 +++ 4 files changed, 164 insertions(+), 69 deletions(-) diff --git a/cassandra/cluster.py b/cassandra/cluster.py index 6d79636c42..cd5bac51a5 100644 --- a/cassandra/cluster.py +++ b/cassandra/cluster.py @@ -83,7 +83,7 @@ from cassandra.marshal import int64_pack from cassandra.tablets import Tablet, Tablets from cassandra.timestamps import MonotonicTimestampGenerator -from cassandra.util import _resolve_contact_points_to_string_map, Version +from cassandra.util import _resolve_contact_points_to_string_map, Version, maybe_add_timeout_to_query from cassandra.datastax.insights.reporter import MonitorReporter from cassandra.datastax.insights.util import version_supports_insights @@ -3725,8 +3725,10 @@ def _try_connect(self, host): sel_peers = self._get_peers_query(self.PeersQueryType.PEERS, connection) sel_local = self._SELECT_LOCAL if self._token_meta_enabled else self._SELECT_LOCAL_NO_TOKENS - peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE) - local_query = QueryMessage(query=sel_local, consistency_level=ConsistencyLevel.ONE) + peers_query = QueryMessage(query=maybe_add_timeout_to_query(sel_peers, self._metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE) + local_query = QueryMessage(query=maybe_add_timeout_to_query(sel_local, self._metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE) (peers_success, peers_result), (local_success, local_result) = connection.wait_for_responses( peers_query, local_query, timeout=self._timeout, fail_on_error=False) @@ -3737,7 +3739,8 @@ def _try_connect(self, host): # error with the peers v2 query, fallback to peers v1 self._uses_peers_v2 = False sel_peers = self._get_peers_query(self.PeersQueryType.PEERS, connection) - peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE) + peers_query = QueryMessage(query=maybe_add_timeout_to_query(sel_peers, self._metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE) peers_result = connection.wait_for_response( peers_query, timeout=self._timeout) @@ -3881,8 +3884,10 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, else: log.debug("[control connection] Refreshing node list and token map") sel_local = self._SELECT_LOCAL - peers_query = QueryMessage(query=sel_peers, consistency_level=cl) - local_query = QueryMessage(query=sel_local, consistency_level=cl) + peers_query = QueryMessage(query=maybe_add_timeout_to_query(sel_peers, self._metadata_request_timeout), + consistency_level=cl) + local_query = QueryMessage(query=maybe_add_timeout_to_query(sel_local, self._metadata_request_timeout), + consistency_level=cl) peers_result, local_result = connection.wait_for_responses( peers_query, local_query, timeout=self._timeout) @@ -3937,8 +3942,9 @@ def _refresh_node_list_and_token_map(self, connection, preloaded_results=None, # local rpc_address has not been queried yet, try to fetch it # separately, which might fail because C* < 2.1.6 doesn't have rpc_address # in system.local. See CASSANDRA-9436. - local_rpc_address_query = QueryMessage(query=self._SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS, - consistency_level=ConsistencyLevel.ONE) + local_rpc_address_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_LOCAL_NO_TOKENS_RPC_ADDRESS, self._metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE) success, local_rpc_address_result = connection.wait_for_response( local_rpc_address_query, timeout=self._timeout, fail_on_error=False) if success: @@ -4173,8 +4179,10 @@ def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wai select_peers_query = self._get_peers_query(self.PeersQueryType.PEERS_SCHEMA, connection) while elapsed < total_timeout: - peers_query = QueryMessage(query=select_peers_query, consistency_level=cl) - local_query = QueryMessage(query=self._SELECT_SCHEMA_LOCAL, consistency_level=cl) + peers_query = QueryMessage(query=maybe_add_timeout_to_query(select_peers_query, self._metadata_request_timeout), + consistency_level=cl) + local_query = QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_SCHEMA_LOCAL, self._metadata_request_timeout), + consistency_level=cl) try: timeout = min(self._timeout, total_timeout - elapsed) peers_result, local_result = connection.wait_for_responses( diff --git a/cassandra/metadata.py b/cassandra/metadata.py index 34a4df127f..18d4249780 100644 --- a/cassandra/metadata.py +++ b/cassandra/metadata.py @@ -43,6 +43,7 @@ from cassandra.pool import HostDistance from cassandra.connection import EndPoint from cassandra.tablets import Tablets +from cassandra.util import maybe_add_timeout_to_query log = logging.getLogger(__name__) @@ -2005,7 +2006,8 @@ def _query_build_row(self, query_string, build_func): return result[0] if result else None def _query_build_rows(self, query_string, build_func): - query = QueryMessage(query=query_string, consistency_level=ConsistencyLevel.ONE, fetch_size=self.fetch_size) + query = QueryMessage(query=maybe_add_timeout_to_query(query_string, self.metadata_request_timeout), + consistency_level=ConsistencyLevel.ONE, fetch_size=self.fetch_size) responses = self.connection.wait_for_responses((query), timeout=self.timeout, fail_on_error=False) (success, response) = responses[0] results = self._handle_results(success, response, expected_failures=(InvalidRequest), query_msg=query) @@ -2105,9 +2107,18 @@ def get_all_keyspaces(self): def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col,), (keyspace, table), _encoder) - cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl) - col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl) - triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl) + cf_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMN_FAMILIES + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) + col_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMNS + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) + triggers_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) (cf_success, cf_result), (col_success, col_result), (triggers_success, triggers_result) \ = self.connection.wait_for_responses(cf_query, col_query, triggers_query, timeout=self.timeout, fail_on_error=False) table_result = self._handle_results(cf_success, cf_result) @@ -2421,13 +2432,34 @@ def _build_trigger_metadata(table_metadata, row): def _query_all(self): cl = ConsistencyLevel.ONE queries = [ - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl) + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_KEYSPACES, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMN_FAMILIES, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMNS, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TYPES, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_FUNCTIONS, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_AGGREGATES, self.metadata_request_timeout), + consistency_level=cl, + ), + QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS, self.metadata_request_timeout), + consistency_level=cl, + ) ] ((ks_success, ks_result), @@ -2593,16 +2625,27 @@ def get_table(self, keyspaces, keyspace, table): cl = ConsistencyLevel.ONE fetch_size = self.fetch_size where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), _encoder) - cf_query = QueryMessage(query=self._SELECT_TABLES + where_clause, consistency_level=cl, fetch_size=fetch_size) - col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl, fetch_size=fetch_size) - indexes_query = QueryMessage(query=self._SELECT_INDEXES + where_clause, consistency_level=cl, fetch_size=fetch_size) - triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl, fetch_size=fetch_size) - scylla_query = QueryMessage(query=self._SELECT_SCYLLA + where_clause, consistency_level=cl, fetch_size=fetch_size) + cf_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TABLES + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) + col_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_COLUMNS + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) + indexes_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_INDEXES + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) + triggers_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) + scylla_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_SCYLLA + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) # in protocol v4 we don't know if this event is a view or a table, so we look for both where_clause = bind_params(" WHERE keyspace_name = %s AND view_name = %s", (keyspace, table), _encoder) - view_query = QueryMessage(query=self._SELECT_VIEWS + where_clause, - consistency_level=cl, fetch_size=fetch_size) + view_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_VIEWS + where_clause, self.metadata_request_timeout), + consistency_level=cl, fetch_size=fetch_size) ((cf_success, cf_result), (col_success, col_result), (indexes_sucess, indexes_result), (triggers_success, triggers_result), (view_success, view_result), @@ -2774,16 +2817,26 @@ def _query_all(self): cl = ConsistencyLevel.ONE fetch_size = self.fetch_size queries = [ - QueryMessage(query=self._SELECT_KEYSPACES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_TABLES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_INDEXES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_VIEWS, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_SCYLLA, fetch_size=fetch_size, consistency_level=cl) + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_KEYSPACES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TABLES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_COLUMNS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TYPES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_FUNCTIONS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_AGGREGATES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_INDEXES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIEWS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_SCYLLA, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), ] ((ks_success, ks_result), @@ -2874,19 +2927,31 @@ def _query_all(self): fetch_size = self.fetch_size queries = [ # copied from V3 - QueryMessage(query=self._SELECT_KEYSPACES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_TABLES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_INDEXES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_VIEWS, fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_KEYSPACES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TABLES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_COLUMNS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TYPES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_FUNCTIONS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_AGGREGATES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_INDEXES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIEWS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), # V4-only queries - QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_TABLES, fetch_size=fetch_size, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, fetch_size=fetch_size, consistency_level=cl) + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_KEYSPACES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_TABLES, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_COLUMNS, self.metadata_request_timeout), + fetch_size=fetch_size, consistency_level=cl), ] responses = self.connection.wait_for_responses( @@ -3010,8 +3075,14 @@ def get_table(self, keyspaces, keyspace, table): table_meta = super(SchemaParserDSE68, self).get_table(keyspaces, keyspace, table) cl = ConsistencyLevel.ONE where_clause = bind_params(" WHERE keyspace_name = %%s AND %s = %%s" % (self._table_name_col), (keyspace, table), _encoder) - vertices_query = QueryMessage(query=self._SELECT_VERTICES + where_clause, consistency_level=cl) - edges_query = QueryMessage(query=self._SELECT_EDGES + where_clause, consistency_level=cl) + vertices_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_VERTICES + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) + edges_query = QueryMessage( + query=maybe_add_timeout_to_query(self._SELECT_EDGES + where_clause, self.metadata_request_timeout), + consistency_level=cl, + ) (vertices_success, vertices_result), (edges_success, edges_result) \ = self.connection.wait_for_responses(vertices_query, edges_query, timeout=self.timeout, fail_on_error=False) @@ -3092,21 +3163,22 @@ def _query_all(self): cl = ConsistencyLevel.ONE queries = [ # copied from v4 - QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl), - QueryMessage(query=self._SELECT_TYPES, consistency_level=cl), - QueryMessage(query=self._SELECT_FUNCTIONS, consistency_level=cl), - QueryMessage(query=self._SELECT_AGGREGATES, consistency_level=cl), - QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl), - QueryMessage(query=self._SELECT_INDEXES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIEWS, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_KEYSPACES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_TABLES, consistency_level=cl), - QueryMessage(query=self._SELECT_VIRTUAL_COLUMNS, consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_KEYSPACES, self.metadata_request_timeout), + consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TABLES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_COLUMNS, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TYPES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_FUNCTIONS, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_AGGREGATES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_TRIGGERS, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_INDEXES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIEWS, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_KEYSPACES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_TABLES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VIRTUAL_COLUMNS, self.metadata_request_timeout), consistency_level=cl), # dse6.8 only - QueryMessage(query=self._SELECT_VERTICES, consistency_level=cl), - QueryMessage(query=self._SELECT_EDGES, consistency_level=cl) + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_VERTICES, self.metadata_request_timeout), consistency_level=cl), + QueryMessage(query=maybe_add_timeout_to_query(self._SELECT_EDGES, self.metadata_request_timeout), consistency_level=cl) ] responses = self.connection.wait_for_responses( diff --git a/cassandra/query.py b/cassandra/query.py index bd8ccd888d..9ad5a3230d 100644 --- a/cassandra/query.py +++ b/cassandra/query.py @@ -26,7 +26,7 @@ import warnings from cassandra import ConsistencyLevel, OperationTimedOut -from cassandra.util import unix_time_from_uuid1 +from cassandra.util import unix_time_from_uuid1, maybe_add_timeout_to_query from cassandra.encoder import Encoder import cassandra.encoder from cassandra.protocol import _UNSET_VALUE @@ -998,8 +998,9 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): "Trace information was not available within %f seconds. Consider raising Session.max_trace_wait." % (max_wait,)) log.debug("Attempting to fetch trace info for trace ID: %s", self.trace_id) + metadata_request_timeout = self._session.cluster.control_connection and self._session.cluster.control_connection._metadata_request_timeout session_results = self._execute( - SimpleStatement(self._SELECT_SESSIONS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) + SimpleStatement(maybe_add_timeout_to_query(self._SELECT_SESSIONS_FORMAT, metadata_request_timeout), consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) # PYTHON-730: There is race condition that the duration mutation is written before started_at the for fast queries session_row = session_results.one() if session_results else None @@ -1024,7 +1025,11 @@ def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None): log.debug("Attempting to fetch trace events for trace ID: %s", self.trace_id) time_spent = time.time() - start event_results = self._execute( - SimpleStatement(self._SELECT_EVENTS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait) + SimpleStatement(maybe_add_timeout_to_query(self._SELECT_EVENTS_FORMAT, metadata_request_timeout), + consistency_level=query_cl), + (self.trace_id,), + time_spent, + max_wait) log.debug("Fetched trace events for trace ID: %s", self.trace_id) self.events = tuple(TraceEvent(r.activity, r.event_id, r.source, r.source_elapsed, r.thread) for r in event_results) diff --git a/cassandra/util.py b/cassandra/util.py index 06d338f2e1..c6e2f0eda9 100644 --- a/cassandra/util.py +++ b/cassandra/util.py @@ -29,6 +29,7 @@ import sys import time import uuid +from typing import Optional _HAS_GEOMET = True try: @@ -1801,3 +1802,12 @@ def __gt__(self, other): (is_major_ge and is_minor_ge and is_patch_ge and is_build_gt) or (is_major_ge and is_minor_ge and is_patch_ge and is_build_ge and is_prerelease_gt) ) + + +def maybe_add_timeout_to_query(stmt: str, metadata_request_timeout: Optional[datetime.timedelta]) -> str: + if metadata_request_timeout is None: + return stmt + ms = int(metadata_request_timeout / datetime.timedelta(milliseconds=1)) + if ms == 0: + return stmt + return f"{stmt} USING TIMEOUT {ms}ms" From 7a4ae44c480bb6c9e6de2093549fd86a5b02256b Mon Sep 17 00:00:00 2001 From: Dmitry Kropachev Date: Fri, 27 Sep 2024 16:13:02 -0400 Subject: [PATCH 270/551] Test metadata_request_timeout configuration option --- tests/integration/standard/test_cluster.py | 2 +- tests/integration/standard/test_metadata.py | 34 +++++++++++++++ tests/unit/advanced/test_metadata.py | 46 ++++++++++++++++++++- tests/unit/test_util_types.py | 14 ++++++- 4 files changed, 92 insertions(+), 4 deletions(-) diff --git a/tests/integration/standard/test_cluster.py b/tests/integration/standard/test_cluster.py index 43356dbd82..e506596bf7 100644 --- a/tests/integration/standard/test_cluster.py +++ b/tests/integration/standard/test_cluster.py @@ -522,7 +522,7 @@ def test_refresh_schema_no_wait(self): def patched_wait_for_responses(*args, **kwargs): # When selecting schema version, replace the real schema UUID with an unexpected UUID response = original_wait_for_responses(*args, **kwargs) - if len(args) > 2 and hasattr(args[2], "query") and args[2].query == "SELECT schema_version FROM system.local WHERE key='local'": + if len(args) > 2 and hasattr(args[2], "query") and "SELECT schema_version FROM system.local WHERE key='local'" in args[2].query: new_uuid = uuid4() response[1].parsed_rows[0] = (new_uuid,) return response diff --git a/tests/integration/standard/test_metadata.py b/tests/integration/standard/test_metadata.py index 8fc50ce89e..944dd8ab20 100644 --- a/tests/integration/standard/test_metadata.py +++ b/tests/integration/standard/test_metadata.py @@ -25,11 +25,13 @@ import pytest from cassandra import AlreadyExists, SignatureDescriptor, UserFunctionDescriptor, UserAggregateDescriptor +from cassandra.connection import Connection from cassandra.encoder import Encoder from cassandra.metadata import (IndexMetadata, Token, murmur3, Function, Aggregate, protect_name, protect_names, RegisteredTableExtension, _RegisteredExtensionType, get_schema_parser, group_keys_by_replica, NO_VALID_REPLICA) +from cassandra.protocol import QueryMessage, ProtocolHandler from cassandra.util import SortedSet from tests.integration import (get_cluster, use_singledc, PROTOCOL_VERSION, execute_until_pass, @@ -1331,6 +1333,38 @@ def test_token(self): cluster.shutdown() +class MetadataTimeoutTest(unittest.TestCase): + """ + Test of TokenMap creation and other behavior. + """ + def test_timeout(self): + cluster = TestCluster() + cluster.metadata_request_timeout = None + + stmts = [] + + class ConnectionWrapper(cluster.connection_class): + def __init__(self, *args, **kwargs): + super(ConnectionWrapper, self).__init__(*args, **kwargs) + + def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, + decoder=ProtocolHandler.decode_message, result_metadata=None): + if isinstance(msg, QueryMessage): + stmts.append(msg.query) + return super(ConnectionWrapper, self).send_msg(msg, request_id, cb, encoder, decoder, result_metadata) + + cluster.connection_class = ConnectionWrapper + s = cluster.connect() + s.execute('SELECT now() FROM system.local') + s.shutdown() + + for stmt in stmts: + if "SELECT now() FROM system.local" in stmt: + continue + if "USING TIMEOUT 2000ms" not in stmt: + self.fail(f"query `{stmt}` does not contain `USING TIMEOUT 2000ms`") + + class KeyspaceAlterMetadata(unittest.TestCase): """ Test verifies that table metadata is preserved on keyspace alter diff --git a/tests/unit/advanced/test_metadata.py b/tests/unit/advanced/test_metadata.py index cf730ebec5..20f80b4da4 100644 --- a/tests/unit/advanced/test_metadata.py +++ b/tests/unit/advanced/test_metadata.py @@ -11,13 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import datetime import unittest from cassandra.metadata import ( KeyspaceMetadata, TableMetadataDSE68, - VertexMetadata, EdgeMetadata + VertexMetadata, EdgeMetadata, SchemaParserV22, _SchemaParser ) +from cassandra.protocol import ResultMessage, RESULT_KIND_ROWS class GraphMetadataToCQLTests(unittest.TestCase): @@ -136,3 +137,44 @@ def test_edge_multiple_partition_and_clustering_keys(self): 'FROM from_label((pk1, pk2), c1, c2) ', tm.as_cql_query() ) + + +class SchemaParsersTests(unittest.TestCase): + def test_metadata_query_metadata_timeout(self): + class FakeConnection: + def __init__(self): + self.queries = [] + + def wait_for_responses(self, *msgs, **kwargs): + self.queries.extend(msgs) + local_response = ResultMessage(kind=RESULT_KIND_ROWS) + local_response.column_names = [] + local_response.parsed_rows = [] + + return [[local_response, local_response] for _ in msgs] + + for schemaClass in get_all_schema_parser_classes(_SchemaParser): + conn = FakeConnection() + p = schemaClass(conn, 2.0, 1000, None) + p._query_all() + + for q in conn.queries: + if "USING TIMEOUT" in q.query: + self.fail(f"<{schemaClass.__name__}> query `{q.query}` contains `USING TIMEOUT`, while should not") + + conn = FakeConnection() + p = schemaClass(conn, 2.0, 1000, datetime.timedelta(seconds=2)) + p._query_all() + + for q in conn.queries: + if "USING TIMEOUT 2000ms" not in q.query: + self.fail(f"{schemaClass.__name__} query `{q.query}` does not contain `USING TIMEOUT 2000ms`") + + +def get_all_schema_parser_classes(cl): + for child in cl.__subclasses__(): + if not child.__name__.startswith('SchemaParser') or child.__module__ != 'cassandra.metadata': + continue + yield child + for c in get_all_schema_parser_classes(child): + yield c diff --git a/tests/unit/test_util_types.py b/tests/unit/test_util_types.py index 5d6058b394..a2551ba20b 100644 --- a/tests/unit/test_util_types.py +++ b/tests/unit/test_util_types.py @@ -15,7 +15,7 @@ import datetime -from cassandra.util import Date, Time, Duration, Version +from cassandra.util import Date, Time, Duration, Version, maybe_add_timeout_to_query class DateTests(unittest.TestCase): @@ -287,3 +287,15 @@ def test_version_compare(self): self.assertTrue(Version('4.0-SNAPSHOT2') > Version('4.0.0-SNAPSHOT1')) self.assertTrue(Version('4.0.0-alpha1-SNAPSHOT') > Version('4.0.0-SNAPSHOT')) + + +class FunctionTests(unittest.TestCase): + def test_maybe_add_timeout_to_query(self): + self.assertEqual( + "SELECT * FROM HOSTS", + maybe_add_timeout_to_query("SELECT * FROM HOSTS", None) + ) + self.assertEqual( + "SELECT * FROM HOSTS USING TIMEOUT 1000ms", + maybe_add_timeout_to_query("SELECT * FROM HOSTS", datetime.timedelta(seconds=1)) + ) From b95e1a0e7bfbc1efaecb91e97e15bd200f44de3d Mon Sep 17 00:00:00 2001 From: David Garcia Date: Tue, 29 Oct 2024 06:56:57 +0000 Subject: [PATCH 271/551] docs: update theme 1.8.3 --- .github/dependabot.yml | 2 - docs/poetry.lock | 1145 ++++++++++++++++++++++------------------ docs/pyproject.toml | 1 + 3 files changed, 622 insertions(+), 526 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 7811ce0305..28784749c4 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,8 +4,6 @@ updates: directory: "/docs" schedule: interval: "daily" - ignore: - - dependency-name: "*" allow: - dependency-name: "sphinx-scylladb-theme" - dependency-name: "sphinx-multiversion-scylla" diff --git a/docs/poetry.lock b/docs/poetry.lock index 4bb20a14e5..d325c568eb 100644 --- a/docs/poetry.lock +++ b/docs/poetry.lock @@ -25,13 +25,13 @@ files = [ [[package]] name = "anyio" -version = "4.6.0" +version = "4.6.2.post1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" files = [ - {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, - {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, ] [package.dependencies] @@ -42,23 +42,20 @@ typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] trio = ["trio (>=0.26.1)"] [[package]] name = "babel" -version = "2.13.1" +version = "2.16.0" description = "Internationalization utilities" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "Babel-2.13.1-py3-none-any.whl", hash = "sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"}, - {file = "Babel-2.13.1.tar.gz", hash = "sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"}, + {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, + {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, ] -[package.dependencies] -setuptools = {version = "*", markers = "python_version >= \"3.12\""} - [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] @@ -85,74 +82,89 @@ lxml = ["lxml"] [[package]] name = "certifi" -version = "2023.7.22" +version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] [[package]] name = "cffi" -version = "1.16.0" +version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" files = [ - {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, - {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, - {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, - {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, - {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, - {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, - {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, - {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, - {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, - {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, - {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, - {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, - {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, - {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, - {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] [package.dependencies] @@ -160,101 +172,116 @@ pycparser = "*" [[package]] name = "charset-normalizer" -version = "3.3.1" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.1.tar.gz", hash = "sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-win32.whl", hash = "sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f"}, - {file = "charset_normalizer-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-win32.whl", hash = "sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8"}, - {file = "charset_normalizer-3.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-win32.whl", hash = "sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61"}, - {file = "charset_normalizer-3.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63a6f59e2d01310f754c270e4a257426fe5a591dc487f1983b3bbe793cf6bac6"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d6bfc32a68bc0933819cfdfe45f9abc3cae3877e1d90aac7259d57e6e0f85b1"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f3100d86dcd03c03f7e9c3fdb23d92e32abbca07e7c13ebd7ddfbcb06f5991f"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39b70a6f88eebe239fa775190796d55a33cfb6d36b9ffdd37843f7c4c1b5dc67"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e12f8ee80aa35e746230a2af83e81bd6b52daa92a8afaef4fea4a2ce9b9f4fa"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b6cefa579e1237ce198619b76eaa148b71894fb0d6bcf9024460f9bf30fd228"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:61f1e3fb621f5420523abb71f5771a204b33c21d31e7d9d86881b2cffe92c47c"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4f6e2a839f83a6a76854d12dbebde50e4b1afa63e27761549d006fa53e9aa80e"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:1ec937546cad86d0dce5396748bf392bb7b62a9eeb8c66efac60e947697f0e58"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:82ca51ff0fc5b641a2d4e1cc8c5ff108699b7a56d7f3ad6f6da9dbb6f0145b48"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:633968254f8d421e70f91c6ebe71ed0ab140220469cf87a9857e21c16687c034"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-win32.whl", hash = "sha256:c0c72d34e7de5604df0fde3644cc079feee5e55464967d10b24b1de268deceb9"}, - {file = "charset_normalizer-3.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:63accd11149c0f9a99e3bc095bbdb5a464862d77a7e309ad5938fbc8721235ae"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-win32.whl", hash = "sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb"}, - {file = "charset_normalizer-3.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-win32.whl", hash = "sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4"}, - {file = "charset_normalizer-3.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727"}, - {file = "charset_normalizer-3.3.1-py3-none-any.whl", hash = "sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -298,32 +325,33 @@ test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] [[package]] name = "dnspython" -version = "2.4.2" +version = "2.7.0" description = "DNS toolkit" optional = false -python-versions = ">=3.8,<4.0" +python-versions = ">=3.9" files = [ - {file = "dnspython-2.4.2-py3-none-any.whl", hash = "sha256:57c6fbaaeaaf39c891292012060beb141791735dbb4004798328fc2c467402d8"}, - {file = "dnspython-2.4.2.tar.gz", hash = "sha256:8dcfae8c7460a2f84b4072e26f1c9f4101ca20c071649cb7c34e8b6a93d58984"}, + {file = "dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86"}, + {file = "dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1"}, ] [package.extras] -dnssec = ["cryptography (>=2.6,<42.0)"] -doh = ["h2 (>=4.1.0)", "httpcore (>=0.17.3)", "httpx (>=0.24.1)"] -doq = ["aioquic (>=0.9.20)"] -idna = ["idna (>=2.1,<4.0)"] -trio = ["trio (>=0.14,<0.23)"] -wmi = ["wmi (>=1.5.1,<2.0.0)"] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "hypercorn (>=0.16.0)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "quart-trio (>=0.11.0)", "sphinx (>=7.2.0)", "sphinx-rtd-theme (>=2.0.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=43)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] +doq = ["aioquic (>=1.0.0)"] +idna = ["idna (>=3.7)"] +trio = ["trio (>=0.23)"] +wmi = ["wmi (>=1.5.1)"] [[package]] name = "docutils" -version = "0.18.1" +version = "0.21.2" description = "Docutils -- Python Documentation Utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.9" files = [ - {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"}, - {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"}, + {file = "docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2"}, + {file = "docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f"}, ] [[package]] @@ -449,72 +477,88 @@ test = ["cffi (>=1.12.2)", "coverage (>=5.0)", "dnspython (>=1.16.0,<2.0)", "idn [[package]] name = "greenlet" -version = "3.0.1" +version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ - {file = "greenlet-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f89e21afe925fcfa655965ca8ea10f24773a1791400989ff32f467badfe4a064"}, - {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28e89e232c7593d33cac35425b58950789962011cc274aa43ef8865f2e11f46d"}, - {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8ba29306c5de7717b5761b9ea74f9c72b9e2b834e24aa984da99cbfc70157fd"}, - {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19bbdf1cce0346ef7341705d71e2ecf6f41a35c311137f29b8a2dc2341374565"}, - {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599daf06ea59bfedbec564b1692b0166a0045f32b6f0933b0dd4df59a854caf2"}, - {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b641161c302efbb860ae6b081f406839a8b7d5573f20a455539823802c655f63"}, - {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d57e20ba591727da0c230ab2c3f200ac9d6d333860d85348816e1dca4cc4792e"}, - {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5805e71e5b570d490938d55552f5a9e10f477c19400c38bf1d5190d760691846"}, - {file = "greenlet-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:52e93b28db27ae7d208748f45d2db8a7b6a380e0d703f099c949d0f0d80b70e9"}, - {file = "greenlet-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f7bfb769f7efa0eefcd039dd19d843a4fbfbac52f1878b1da2ed5793ec9b1a65"}, - {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e6c7db42638dc45cf2e13c73be16bf83179f7859b07cfc139518941320be96"}, - {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1757936efea16e3f03db20efd0cd50a1c86b06734f9f7338a90c4ba85ec2ad5a"}, - {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19075157a10055759066854a973b3d1325d964d498a805bb68a1f9af4aaef8ec"}, - {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9d21aaa84557d64209af04ff48e0ad5e28c5cca67ce43444e939579d085da72"}, - {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2847e5d7beedb8d614186962c3d774d40d3374d580d2cbdab7f184580a39d234"}, - {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:97e7ac860d64e2dcba5c5944cfc8fa9ea185cd84061c623536154d5a89237884"}, - {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b2c02d2ad98116e914d4f3155ffc905fd0c025d901ead3f6ed07385e19122c94"}, - {file = "greenlet-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:22f79120a24aeeae2b4471c711dcf4f8c736a2bb2fabad2a67ac9a55ea72523c"}, - {file = "greenlet-3.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:100f78a29707ca1525ea47388cec8a049405147719f47ebf3895e7509c6446aa"}, - {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60d5772e8195f4e9ebf74046a9121bbb90090f6550f81d8956a05387ba139353"}, - {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:daa7197b43c707462f06d2c693ffdbb5991cbb8b80b5b984007de431493a319c"}, - {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea6b8aa9e08eea388c5f7a276fabb1d4b6b9d6e4ceb12cc477c3d352001768a9"}, - {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d11ebbd679e927593978aa44c10fc2092bc454b7d13fdc958d3e9d508aba7d0"}, - {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbd4c177afb8a8d9ba348d925b0b67246147af806f0b104af4d24f144d461cd5"}, - {file = "greenlet-3.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20107edf7c2c3644c67c12205dc60b1bb11d26b2610b276f97d666110d1b511d"}, - {file = "greenlet-3.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8bef097455dea90ffe855286926ae02d8faa335ed8e4067326257cb571fc1445"}, - {file = "greenlet-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:b2d3337dcfaa99698aa2377c81c9ca72fcd89c07e7eb62ece3f23a3fe89b2ce4"}, - {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80ac992f25d10aaebe1ee15df45ca0d7571d0f70b645c08ec68733fb7a020206"}, - {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:337322096d92808f76ad26061a8f5fccb22b0809bea39212cd6c406f6a7060d2"}, - {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9934adbd0f6e476f0ecff3c94626529f344f57b38c9a541f87098710b18af0a"}, - {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc4d815b794fd8868c4d67602692c21bf5293a75e4b607bb92a11e821e2b859a"}, - {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41bdeeb552d814bcd7fb52172b304898a35818107cc8778b5101423c9017b3de"}, - {file = "greenlet-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6e6061bf1e9565c29002e3c601cf68569c450be7fc3f7336671af7ddb4657166"}, - {file = "greenlet-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fa24255ae3c0ab67e613556375a4341af04a084bd58764731972bcbc8baeba36"}, - {file = "greenlet-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:b489c36d1327868d207002391f662a1d163bdc8daf10ab2e5f6e41b9b96de3b1"}, - {file = "greenlet-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f33f3258aae89da191c6ebaa3bc517c6c4cbc9b9f689e5d8452f7aedbb913fa8"}, - {file = "greenlet-3.0.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:d2905ce1df400360463c772b55d8e2518d0e488a87cdea13dd2c71dcb2a1fa16"}, - {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a02d259510b3630f330c86557331a3b0e0c79dac3d166e449a39363beaae174"}, - {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55d62807f1c5a1682075c62436702aaba941daa316e9161e4b6ccebbbf38bda3"}, - {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3fcc780ae8edbb1d050d920ab44790201f027d59fdbd21362340a85c79066a74"}, - {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eddd98afc726f8aee1948858aed9e6feeb1758889dfd869072d4465973f6bfd"}, - {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eabe7090db68c981fca689299c2d116400b553f4b713266b130cfc9e2aa9c5a9"}, - {file = "greenlet-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f2f6d303f3dee132b322a14cd8765287b8f86cdc10d2cb6a6fae234ea488888e"}, - {file = "greenlet-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d923ff276f1c1f9680d32832f8d6c040fe9306cbfb5d161b0911e9634be9ef0a"}, - {file = "greenlet-3.0.1-cp38-cp38-win32.whl", hash = "sha256:0b6f9f8ca7093fd4433472fd99b5650f8a26dcd8ba410e14094c1e44cd3ceddd"}, - {file = "greenlet-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:990066bff27c4fcf3b69382b86f4c99b3652bab2a7e685d968cd4d0cfc6f67c6"}, - {file = "greenlet-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ce85c43ae54845272f6f9cd8320d034d7a946e9773c693b27d620edec825e376"}, - {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89ee2e967bd7ff85d84a2de09df10e021c9b38c7d91dead95b406ed6350c6997"}, - {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87c8ceb0cf8a5a51b8008b643844b7f4a8264a2c13fcbcd8a8316161725383fe"}, - {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6a8c9d4f8692917a3dc7eb25a6fb337bff86909febe2f793ec1928cd97bedfc"}, - {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fbc5b8f3dfe24784cee8ce0be3da2d8a79e46a276593db6868382d9c50d97b1"}, - {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85d2b77e7c9382f004b41d9c72c85537fac834fb141b0296942d52bf03fe4a3d"}, - {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:696d8e7d82398e810f2b3622b24e87906763b6ebfd90e361e88eb85b0e554dc8"}, - {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:329c5a2e5a0ee942f2992c5e3ff40be03e75f745f48847f118a3cfece7a28546"}, - {file = "greenlet-3.0.1-cp39-cp39-win32.whl", hash = "sha256:cf868e08690cb89360eebc73ba4be7fb461cfbc6168dd88e2fbbe6f31812cd57"}, - {file = "greenlet-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:ac4a39d1abae48184d420aa8e5e63efd1b75c8444dd95daa3e03f6c6310e9619"}, - {file = "greenlet-3.0.1.tar.gz", hash = "sha256:816bd9488a94cba78d93e1abb58000e8266fa9cc2aa9ccdd6eb0696acb24005b"}, + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, ] [package.extras] -docs = ["Sphinx"] +docs = ["Sphinx", "furo"] test = ["objgraph", "psutil"] [[package]] @@ -547,15 +591,18 @@ files = [ [[package]] name = "idna" -version = "3.4" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "imagesize" version = "1.4.1" @@ -569,27 +616,24 @@ files = [ [[package]] name = "isodate" -version = "0.6.1" +version = "0.7.2" description = "An ISO 8601 date/time/duration parser and formatter" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, - {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, + {file = "isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15"}, + {file = "isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6"}, ] -[package.dependencies] -six = "*" - [[package]] name = "jinja2" -version = "3.1.2" +version = "3.1.4" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, ] [package.dependencies] @@ -598,95 +642,131 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "markupsafe" -version = "2.1.3" +version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false +python-versions = ">=3.9" +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false python-versions = ">=3.7" files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] [[package]] name = "packaging" -version = "23.2" +version = "24.1" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] name = "pycparser" -version = "2.21" +version = "2.22" description = "C parser in Python" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.8" files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] [[package]] @@ -705,61 +785,64 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] @@ -798,13 +881,13 @@ test = ["pre-commit", "pytest"] [[package]] name = "requests" -version = "2.31.0" +version = "2.32.3" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -817,6 +900,25 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rich" +version = "13.9.3" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "rich-13.9.3-py3-none-any.whl", hash = "sha256:9836f5096eb2172c9e77df411c1b009bace4193d6a481d534fea75ebba758283"}, + {file = "rich-13.9.3.tar.gz", hash = "sha256:bc1e01b899537598cf02579d2b9f4a415104d3fc439313a7a2c165d76557a08e"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + [[package]] name = "scales" version = "1.0.9" @@ -832,24 +934,35 @@ six = "*" [[package]] name = "setuptools" -version = "74.1.3" +version = "75.2.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-74.1.3-py3-none-any.whl", hash = "sha256:1cfd66bfcf197bce344da024c8f5b35acc4dcb7ca5202246a75296b4883f6851"}, - {file = "setuptools-74.1.3.tar.gz", hash = "sha256:fbb126f14b0b9ffa54c4574a50ae60673bbe8ae0b1645889d10b3b14f5891d28"}, + {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"}, + {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + [[package]] name = "six" version = "1.16.0" @@ -885,37 +998,37 @@ files = [ [[package]] name = "soupsieve" -version = "2.5" +version = "2.6" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" files = [ - {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, - {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, + {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, + {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, ] [[package]] name = "sphinx" -version = "7.3.7" +version = "7.4.7" description = "Python documentation generator" optional = false python-versions = ">=3.9" files = [ - {file = "sphinx-7.3.7-py3-none-any.whl", hash = "sha256:413f75440be4cacf328f580b4274ada4565fb2187d696a84970c23f77b64d8c3"}, - {file = "sphinx-7.3.7.tar.gz", hash = "sha256:a4a7db75ed37531c05002d56ed6948d4c42f473a36f46e1382b0bd76ca9627bc"}, + {file = "sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239"}, + {file = "sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe"}, ] [package.dependencies] alabaster = ">=0.7.14,<0.8.0" -babel = ">=2.9" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.18.1,<0.22" +babel = ">=2.13" +colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\""} +docutils = ">=0.20,<0.22" imagesize = ">=1.3" -Jinja2 = ">=3.0" -packaging = ">=21.0" -Pygments = ">=2.14" -requests = ">=2.25.0" -snowballstemmer = ">=2.0" +Jinja2 = ">=3.1" +packaging = ">=23.0" +Pygments = ">=2.17" +requests = ">=2.30.0" +snowballstemmer = ">=2.2" sphinxcontrib-applehelp = "*" sphinxcontrib-devhelp = "*" sphinxcontrib-htmlhelp = ">=2.0.0" @@ -926,18 +1039,18 @@ tomli = {version = ">=2", markers = "python_version < \"3.11\""} [package.extras] docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=3.5.0)", "importlib_metadata", "mypy (==1.9.0)", "pytest (>=6.0)", "ruff (==0.3.7)", "sphinx-lint", "tomli", "types-docutils", "types-requests"] -test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=6.0)", "setuptools (>=67.0)"] +lint = ["flake8 (>=6.0)", "importlib-metadata (>=6.0)", "mypy (==1.10.1)", "pytest (>=6.0)", "ruff (==0.5.2)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-docutils (==0.21.0.20240711)", "types-requests (>=2.30.0)"] +test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"] [[package]] name = "sphinx-autobuild" -version = "2024.9.19" +version = "2024.10.3" description = "Rebuild Sphinx documentation on changes, with hot reloading in the browser." optional = false python-versions = ">=3.9" files = [ - {file = "sphinx_autobuild-2024.9.19-py3-none-any.whl", hash = "sha256:57d974eebfc6461ff0fd136e78bf7a9c057d543d5166d318a45599898019b82c"}, - {file = "sphinx_autobuild-2024.9.19.tar.gz", hash = "sha256:2dd4863d174e533c1cd075eb5dfc90ad9a21734af7efd25569bf228b405e08ef"}, + {file = "sphinx_autobuild-2024.10.3-py3-none-any.whl", hash = "sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa"}, + {file = "sphinx_autobuild-2024.10.3.tar.gz", hash = "sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1"}, ] [package.dependencies] @@ -953,13 +1066,13 @@ test = ["httpx", "pytest (>=6)"] [[package]] name = "sphinx-collapse" -version = "0.1.2" +version = "0.1.3" description = "Collapse extension for Sphinx." optional = false python-versions = ">=3.7" files = [ - {file = "sphinx_collapse-0.1.2-py3-none-any.whl", hash = "sha256:7a2082da3c779916cc4c4d44832db3522a3a8bfbd12598ef01fb9eb523a164d0"}, - {file = "sphinx_collapse-0.1.2.tar.gz", hash = "sha256:a186000bf3fdac8ac0e8a99979f720ae790de15a5efc1435d4816f79a3d377c2"}, + {file = "sphinx_collapse-0.1.3-py3-none-any.whl", hash = "sha256:85fadb2ec8769b93fd04276538668fa96239ef60c20c4a9eaa3e480387a6e65b"}, + {file = "sphinx_collapse-0.1.3.tar.gz", hash = "sha256:cae141e6f03ecd52ed246a305a69e1b0d5d05e6cdf3fe803d40d583ad6ad895a"}, ] [package.dependencies] @@ -989,13 +1102,12 @@ rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"] [[package]] name = "sphinx-multiversion-scylla" -version = "0.3.1" +version = "0.3.2" description = "Add support for multiple versions to sphinx" optional = false python-versions = "*" files = [ - {file = "sphinx-multiversion-scylla-0.3.1.tar.gz", hash = "sha256:6c04f35ce76b60c4b54d72c52d299624ddc93f2930606bf76db33c214ca38380"}, - {file = "sphinx_multiversion_scylla-0.3.1-py3-none-any.whl", hash = "sha256:762cfb79f4ea2540653a5e8d30f8b604362cebaafb87934895dcc5a8bea6e255"}, + {file = "sphinx_multiversion_scylla-0.3.2.tar.gz", hash = "sha256:f415311273228f4f766c36256503da8e2ce01f9d13423f3fcee3160d6284852b"}, ] [package.dependencies] @@ -1021,19 +1133,19 @@ test = ["tox"] [[package]] name = "sphinx-scylladb-theme" -version = "1.8.1" +version = "1.8.3" description = "A Sphinx Theme for ScyllaDB documentation projects" optional = false python-versions = "<4.0,>=3.10" files = [ - {file = "sphinx_scylladb_theme-1.8.1-py3-none-any.whl", hash = "sha256:cddc3fd7f0509af8a5668a029abff7c8fea7442fd788036bbd010fe7db22e9f2"}, - {file = "sphinx_scylladb_theme-1.8.1.tar.gz", hash = "sha256:16872cba848fac491e3a3cc62fddd82daacf05c4e63a0c9defb1ec23041bb885"}, + {file = "sphinx_scylladb_theme-1.8.3-py3-none-any.whl", hash = "sha256:4671a4488c622136228ef42f7348d8dc6f364f2e999594a24d65cab2ba96d8ac"}, + {file = "sphinx_scylladb_theme-1.8.3.tar.gz", hash = "sha256:606478089653f6e21c245c609f40a5ba3bc478f2a867b078c476e1ac062378d3"}, ] [package.dependencies] beautifulsoup4 = ">=4.12.3,<5.0.0" pyyaml = ">=6.0.1,<7.0.0" -setuptools = ">=70.1.1,<75.0.0" +setuptools = ">=70.1.1,<76.0.0" sphinx-collapse = ">=0.1.1,<0.2.0" sphinx-copybutton = ">=0.5.2,<0.6.0" sphinx-notfound-page = ">=1.0.4,<2.0.0" @@ -1078,19 +1190,19 @@ prompt = ["sphinx-prompt (>=0.1)"] [[package]] name = "sphinx-tabs" -version = "3.4.5" +version = "3.4.7" description = "Tabbed views for Sphinx" optional = false -python-versions = "~=3.7" +python-versions = ">=3.7" files = [ - {file = "sphinx-tabs-3.4.5.tar.gz", hash = "sha256:ba9d0c1e3e37aaadd4b5678449eb08176770e0fc227e769b6ce747df3ceea531"}, - {file = "sphinx_tabs-3.4.5-py3-none-any.whl", hash = "sha256:92cc9473e2ecf1828ca3f6617d0efc0aa8acb06b08c56ba29d1413f2f0f6cf09"}, + {file = "sphinx-tabs-3.4.7.tar.gz", hash = "sha256:991ad4a424ff54119799ba1491701aa8130dd43509474aef45a81c42d889784d"}, + {file = "sphinx_tabs-3.4.7-py3-none-any.whl", hash = "sha256:c12d7a36fd413b369e9e9967a0a4015781b71a9c393575419834f19204bd1915"}, ] [package.dependencies] docutils = "*" pygments = "*" -sphinx = "*" +sphinx = ">=1.8" [package.extras] code-style = ["pre-commit (==2.13.0)"] @@ -1098,56 +1210,50 @@ testing = ["bs4", "coverage", "pygments", "pytest (>=7.1,<8)", "pytest-cov", "py [[package]] name = "sphinxcontrib-applehelp" -version = "1.0.7" +version = "2.0.0" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_applehelp-1.0.7-py3-none-any.whl", hash = "sha256:094c4d56209d1734e7d252f6e0b3ccc090bd52ee56807a5d9315b19c122ab15d"}, - {file = "sphinxcontrib_applehelp-1.0.7.tar.gz", hash = "sha256:39fdc8d762d33b01a7d8f026a3b7d71563ea3b72787d5f00ad8465bd9d6dfbfa"}, + {file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"}, + {file = "sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] name = "sphinxcontrib-devhelp" -version = "1.0.5" +version = "2.0.0" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_devhelp-1.0.5-py3-none-any.whl", hash = "sha256:fe8009aed765188f08fcaadbb3ea0d90ce8ae2d76710b7e29ea7d047177dae2f"}, - {file = "sphinxcontrib_devhelp-1.0.5.tar.gz", hash = "sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212"}, + {file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"}, + {file = "sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] name = "sphinxcontrib-htmlhelp" -version = "2.0.4" +version = "2.1.0" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_htmlhelp-2.0.4-py3-none-any.whl", hash = "sha256:8001661c077a73c29beaf4a79968d0726103c5605e27db92b9ebed8bab1359e9"}, - {file = "sphinxcontrib_htmlhelp-2.0.4.tar.gz", hash = "sha256:6c26a118a05b76000738429b724a0568dbde5b72391a688577da08f11891092a"}, + {file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"}, + {file = "sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] test = ["html5lib", "pytest"] [[package]] @@ -1166,49 +1272,45 @@ test = ["flake8", "mypy", "pytest"] [[package]] name = "sphinxcontrib-qthelp" -version = "1.0.6" +version = "2.0.0" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_qthelp-1.0.6-py3-none-any.whl", hash = "sha256:bf76886ee7470b934e363da7a954ea2825650013d367728588732c7350f49ea4"}, - {file = "sphinxcontrib_qthelp-1.0.6.tar.gz", hash = "sha256:62b9d1a186ab7f5ee3356d906f648cacb7a6bdb94d201ee7adf26db55092982d"}, + {file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"}, + {file = "sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["defusedxml (>=0.7.1)", "pytest"] [[package]] name = "sphinxcontrib-serializinghtml" -version = "1.1.9" +version = "2.0.0" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_serializinghtml-1.1.9-py3-none-any.whl", hash = "sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1"}, - {file = "sphinxcontrib_serializinghtml-1.1.9.tar.gz", hash = "sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54"}, + {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"}, + {file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] name = "starlette" -version = "0.39.1" +version = "0.41.2" description = "The little ASGI library that shines." optional = false python-versions = ">=3.8" files = [ - {file = "starlette-0.39.1-py3-none-any.whl", hash = "sha256:0d31c90dacae588734e91b98cb4469fd37848ef23d2dd34355c5542bc827c02a"}, - {file = "starlette-0.39.1.tar.gz", hash = "sha256:33c5a94f64d3ab2c799b2715b45f254a3752f229d334f1562a3aaf78c23eab95"}, + {file = "starlette-0.41.2-py3-none-any.whl", hash = "sha256:fbc189474b4731cf30fcef52f18a8d070e3f3b46c6a04c97579e85e6ffca942d"}, + {file = "starlette-0.41.2.tar.gz", hash = "sha256:9834fd799d1a87fd346deb76158668cfa0b0d56f85caefe8268e2d97c3468b62"}, ] [package.dependencies] @@ -1219,89 +1321,83 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7 [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] name = "tornado" -version = "5.1.1" +version = "4.5.3" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = false -python-versions = ">= 2.7, !=3.0.*, !=3.1.*, !=3.2.*, != 3.3.*" +python-versions = "*" files = [ - {file = "tornado-5.1.1-cp35-cp35m-win32.whl", hash = "sha256:732e836008c708de2e89a31cb2fa6c0e5a70cb60492bee6f1ea1047500feaf7f"}, - {file = "tornado-5.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:0662d28b1ca9f67108c7e3b77afabfb9c7e87bde174fbda78186ecedc2499a9d"}, - {file = "tornado-5.1.1-cp36-cp36m-win32.whl", hash = "sha256:8154ec22c450df4e06b35f131adc4f2f3a12ec85981a203301d310abf580500f"}, - {file = "tornado-5.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:d4b3e5329f572f055b587efc57d29bd051589fb5a43ec8898c77a47ec2fa2bbb"}, - {file = "tornado-5.1.1-cp37-cp37m-win32.whl", hash = "sha256:e5f2585afccbff22390cddac29849df463b252b711aa2ce7c5f3f342a5b3b444"}, - {file = "tornado-5.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:8e9d728c4579682e837c92fdd98036bd5cdefa1da2aaf6acf26947e6dd0c01c5"}, - {file = "tornado-5.1.1.tar.gz", hash = "sha256:4e5158d97583502a7e2739951553cbd88a72076f152b4b11b64b9a10c4c49409"}, + {file = "tornado-4.5.3-cp35-cp35m-win32.whl", hash = "sha256:92b7ca81e18ba9ec3031a7ee73d4577ac21d41a0c9b775a9182f43301c3b5f8e"}, + {file = "tornado-4.5.3-cp35-cp35m-win_amd64.whl", hash = "sha256:b36298e9f63f18cad97378db2222c0e0ca6a55f6304e605515e05a25483ed51a"}, + {file = "tornado-4.5.3-cp36-cp36m-win32.whl", hash = "sha256:ab587996fe6fb9ce65abfda440f9b61e4f9f2cf921967723540679176915e4c3"}, + {file = "tornado-4.5.3-cp36-cp36m-win_amd64.whl", hash = "sha256:5ef073ac6180038ccf99411fe05ae9aafb675952a2c8db60592d5daf8401f803"}, + {file = "tornado-4.5.3.tar.gz", hash = "sha256:6d14e47eab0e15799cf3cdcc86b0b98279da68522caace2bd7ce644287685f0a"}, ] [[package]] name = "typer" -version = "0.9.0" +version = "0.12.5" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, - {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, + {file = "typer-0.12.5-py3-none-any.whl", hash = "sha256:62fe4e471711b147e3365034133904df3e235698399bc4de2b36c8579298d52b"}, + {file = "typer-0.12.5.tar.gz", hash = "sha256:f592f089bedcc8ec1b974125d64851029c3b1af145f04aca64d69410f0c9b722"}, ] [package.dependencies] -click = ">=7.1.1,<9.0.0" +click = ">=8.0.0" +rich = ">=10.11.0" +shellingham = ">=1.3.0" typing-extensions = ">=3.7.4.3" -[package.extras] -all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] -dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] -doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] -test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] - [[package]] name = "typing-extensions" -version = "4.8.0" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, - {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] name = "urllib3" -version = "2.0.7" +version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"}, - {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"}, + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] +h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "uvicorn" -version = "0.31.0" +version = "0.32.0" description = "The lightning-fast ASGI server." optional = false python-versions = ">=3.8" files = [ - {file = "uvicorn-0.31.0-py3-none-any.whl", hash = "sha256:cac7be4dd4d891c363cd942160a7b02e69150dcbc7a36be04d5f4af4b17c8ced"}, - {file = "uvicorn-0.31.0.tar.gz", hash = "sha256:13bc21373d103859f68fe739608e2eb054a816dea79189bc3ca08ea89a275906"}, + {file = "uvicorn-0.32.0-py3-none-any.whl", hash = "sha256:60b8f3a5ac027dcd31448f411ced12b5ef452c646f76f02f8cc3f25d8d26fd82"}, + {file = "uvicorn-0.32.0.tar.gz", hash = "sha256:f78b36b143c16f54ccdb8190d0a26b5f1901fe5a3c777e1ab29f26391af8551e"}, ] [package.dependencies] @@ -1522,58 +1618,59 @@ test = ["zope.testrunner"] [[package]] name = "zope-interface" -version = "6.1" +version = "7.1.1" description = "Interfaces for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "zope.interface-6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:43b576c34ef0c1f5a4981163b551a8781896f2a37f71b8655fd20b5af0386abb"}, - {file = "zope.interface-6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:67be3ca75012c6e9b109860820a8b6c9a84bfb036fbd1076246b98e56951ca92"}, - {file = "zope.interface-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b9bc671626281f6045ad61d93a60f52fd5e8209b1610972cf0ef1bbe6d808e3"}, - {file = "zope.interface-6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbe81def9cf3e46f16ce01d9bfd8bea595e06505e51b7baf45115c77352675fd"}, - {file = "zope.interface-6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dc998f6de015723196a904045e5a2217f3590b62ea31990672e31fbc5370b41"}, - {file = "zope.interface-6.1-cp310-cp310-win_amd64.whl", hash = "sha256:239a4a08525c080ff833560171d23b249f7f4d17fcbf9316ef4159f44997616f"}, - {file = "zope.interface-6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9ffdaa5290422ac0f1688cb8adb1b94ca56cee3ad11f29f2ae301df8aecba7d1"}, - {file = "zope.interface-6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:34c15ca9248f2e095ef2e93af2d633358c5f048c49fbfddf5fdfc47d5e263736"}, - {file = "zope.interface-6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b012d023b4fb59183909b45d7f97fb493ef7a46d2838a5e716e3155081894605"}, - {file = "zope.interface-6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97806e9ca3651588c1baaebb8d0c5ee3db95430b612db354c199b57378312ee8"}, - {file = "zope.interface-6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fddbab55a2473f1d3b8833ec6b7ac31e8211b0aa608df5ab09ce07f3727326de"}, - {file = "zope.interface-6.1-cp311-cp311-win_amd64.whl", hash = "sha256:a0da79117952a9a41253696ed3e8b560a425197d4e41634a23b1507efe3273f1"}, - {file = "zope.interface-6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8bb9c990ca9027b4214fa543fd4025818dc95f8b7abce79d61dc8a2112b561a"}, - {file = "zope.interface-6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b51b64432eed4c0744241e9ce5c70dcfecac866dff720e746d0a9c82f371dfa7"}, - {file = "zope.interface-6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa6fd016e9644406d0a61313e50348c706e911dca29736a3266fc9e28ec4ca6d"}, - {file = "zope.interface-6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c8cf55261e15590065039696607f6c9c1aeda700ceee40c70478552d323b3ff"}, - {file = "zope.interface-6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e30506bcb03de8983f78884807e4fd95d8db6e65b69257eea05d13d519b83ac0"}, - {file = "zope.interface-6.1-cp312-cp312-win_amd64.whl", hash = "sha256:e33e86fd65f369f10608b08729c8f1c92ec7e0e485964670b4d2633a4812d36b"}, - {file = "zope.interface-6.1-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:2f8d89721834524a813f37fa174bac074ec3d179858e4ad1b7efd4401f8ac45d"}, - {file = "zope.interface-6.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13b7d0f2a67eb83c385880489dbb80145e9d344427b4262c49fbf2581677c11c"}, - {file = "zope.interface-6.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef43ee91c193f827e49599e824385ec7c7f3cd152d74cb1dfe02cb135f264d83"}, - {file = "zope.interface-6.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e441e8b7d587af0414d25e8d05e27040d78581388eed4c54c30c0c91aad3a379"}, - {file = "zope.interface-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f89b28772fc2562ed9ad871c865f5320ef761a7fcc188a935e21fe8b31a38ca9"}, - {file = "zope.interface-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70d2cef1bf529bff41559be2de9d44d47b002f65e17f43c73ddefc92f32bf00f"}, - {file = "zope.interface-6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ad54ed57bdfa3254d23ae04a4b1ce405954969c1b0550cc2d1d2990e8b439de1"}, - {file = "zope.interface-6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef467d86d3cfde8b39ea1b35090208b0447caaabd38405420830f7fd85fbdd56"}, - {file = "zope.interface-6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6af47f10cfc54c2ba2d825220f180cc1e2d4914d783d6fc0cd93d43d7bc1c78b"}, - {file = "zope.interface-6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9559138690e1bd4ea6cd0954d22d1e9251e8025ce9ede5d0af0ceae4a401e43"}, - {file = "zope.interface-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:964a7af27379ff4357dad1256d9f215047e70e93009e532d36dcb8909036033d"}, - {file = "zope.interface-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:387545206c56b0315fbadb0431d5129c797f92dc59e276b3ce82db07ac1c6179"}, - {file = "zope.interface-6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:57d0a8ce40ce440f96a2c77824ee94bf0d0925e6089df7366c2272ccefcb7941"}, - {file = "zope.interface-6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ebc4d34e7620c4f0da7bf162c81978fce0ea820e4fa1e8fc40ee763839805f3"}, - {file = "zope.interface-6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a804abc126b33824a44a7aa94f06cd211a18bbf31898ba04bd0924fbe9d282d"}, - {file = "zope.interface-6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f294a15f7723fc0d3b40701ca9b446133ec713eafc1cc6afa7b3d98666ee1ac"}, - {file = "zope.interface-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:a41f87bb93b8048fe866fa9e3d0c51e27fe55149035dcf5f43da4b56732c0a40"}, - {file = "zope.interface-6.1.tar.gz", hash = "sha256:2fdc7ccbd6eb6b7df5353012fbed6c3c5d04ceaca0038f75e601060e95345309"}, + {file = "zope.interface-7.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6650bd56ef350d37c8baccfd3ee8a0483ed6f8666e641e4b9ae1a1827b79f9e5"}, + {file = "zope.interface-7.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:84e87eba6b77a3af187bae82d8de1a7c208c2a04ec9f6bd444fd091b811ad92e"}, + {file = "zope.interface-7.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c4e1b4c06d9abd1037c088dae1566c85f344a3e6ae4350744c3f7f7259d9c67"}, + {file = "zope.interface-7.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cd5e3d910ac87652a09f6e5db8e41bc3b49cf08ddd2d73d30afc644801492cd"}, + {file = "zope.interface-7.1.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca95594d936ee349620900be5b46c0122a1ff6ce42d7d5cb2cf09dc84071ef16"}, + {file = "zope.interface-7.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:ad339509dcfbbc99bf8e147db6686249c4032f26586699ec4c82f6e5909c9fe2"}, + {file = "zope.interface-7.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e59f175e868f856a77c0a77ba001385c377df2104fdbda6b9f99456a01e102a"}, + {file = "zope.interface-7.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0de23bcb93401994ea00bc5c677ef06d420340ac0a4e9c10d80e047b9ce5af3f"}, + {file = "zope.interface-7.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cdb7e7e5524b76d3ec037c1d81a9e2c7457b240fd4cb0a2476b65c3a5a6c81f"}, + {file = "zope.interface-7.1.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3603ef82a9920bd0bfb505423cb7e937498ad971ad5a6141841e8f76d2fd5446"}, + {file = "zope.interface-7.1.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1d52d052355e0c5c89e0630dd2ff7c0b823fd5f56286a663e92444761b35e25"}, + {file = "zope.interface-7.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:179ad46ece518c9084cb272e4a69d266b659f7f8f48e51706746c2d8a426433e"}, + {file = "zope.interface-7.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e6503534b52bb1720ace9366ee30838a58a3413d3e197512f3338c8f34b5d89d"}, + {file = "zope.interface-7.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f85b290e5b8b11814efb0d004d8ce6c9a483c35c462e8d9bf84abb93e79fa770"}, + {file = "zope.interface-7.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d029fac6a80edae80f79c37e5e3abfa92968fe921886139b3ee470a1b177321a"}, + {file = "zope.interface-7.1.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5836b8fb044c6e75ba34dfaabc602493019eadfa0faf6ff25f4c4c356a71a853"}, + {file = "zope.interface-7.1.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7395f13533318f150ee72adb55b29284b16e73b6d5f02ab21f173b3e83f242b8"}, + {file = "zope.interface-7.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:1d0e23c6b746eb8ce04573cc47bcac60961ac138885d207bd6f57e27a1431ae8"}, + {file = "zope.interface-7.1.1-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:9fad9bd5502221ab179f13ea251cb30eef7cf65023156967f86673aff54b53a0"}, + {file = "zope.interface-7.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:55c373becbd36a44d0c9be1d5271422fdaa8562d158fb44b4192297b3c67096c"}, + {file = "zope.interface-7.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed1df8cc01dd1e3970666a7370b8bfc7457371c58ba88c57bd5bca17ab198053"}, + {file = "zope.interface-7.1.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99c14f0727c978639139e6cad7a60e82b7720922678d75aacb90cf4ef74a068c"}, + {file = "zope.interface-7.1.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b1eed7670d564f1025d7cda89f99f216c30210e42e95de466135be0b4a499d9"}, + {file = "zope.interface-7.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:3defc925c4b22ac1272d544a49c6ba04c3eefcce3200319ee1be03d9270306dd"}, + {file = "zope.interface-7.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8d0fe45be57b5219aa4b96e846631c04615d5ef068146de5a02ccd15c185321f"}, + {file = "zope.interface-7.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bcbeb44fc16e0078b3b68a95e43f821ae34dcbf976dde6985141838a5f23dd3d"}, + {file = "zope.interface-7.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8e7b05dc6315a193cceaec071cc3cf1c180cea28808ccded0b1283f1c38ba73"}, + {file = "zope.interface-7.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d553e02b68c0ea5a226855f02edbc9eefd99f6a8886fa9f9bdf999d77f46585"}, + {file = "zope.interface-7.1.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81744a7e61b598ebcf4722ac56a7a4f50502432b5b4dc7eb29075a89cf82d029"}, + {file = "zope.interface-7.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7720322763aceb5e0a7cadcc38c67b839efe599f0887cbf6c003c55b1458c501"}, + {file = "zope.interface-7.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1a2ed0852c25950cf430067f058f8d98df6288502ac313861d9803fe7691a9b3"}, + {file = "zope.interface-7.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9595e478047ce752b35cfa221d7601a5283ccdaab40422e0dc1d4a334c70f580"}, + {file = "zope.interface-7.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2317e1d4dba68203a5227ea3057f9078ec9376275f9700086b8f0ffc0b358e1b"}, + {file = "zope.interface-7.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6821ef9870f32154da873fcde439274f99814ea452dd16b99fa0b66345c4b6b"}, + {file = "zope.interface-7.1.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:190eeec67e023d5aac54d183fa145db0b898664234234ac54643a441da434616"}, + {file = "zope.interface-7.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:d17e7fc814eaab93409b80819fd6d30342844345c27f3bc3c4b43c2425a8d267"}, + {file = "zope.interface-7.1.1.tar.gz", hash = "sha256:4284d664ef0ff7b709836d4de7b13d80873dc5faeffc073abdb280058bfac5e3"}, ] [package.dependencies] setuptools = "*" [package.extras] -docs = ["Sphinx", "repoze.sphinx.autointerface", "sphinx-rtd-theme"] -test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] -testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] +docs = ["Sphinx", "furo", "repoze.sphinx.autointerface"] +test = ["coverage[toml]", "zope.event", "zope.testing"] +testing = ["coverage[toml]", "zope.event", "zope.testing"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "8f7b4cb1dfb489f9f4abdb06ca417d2d2947629c338eeed5d4cab8ce73aec0c0" +content-hash = "302d62881c3c0d5ae60560928810117c52594d173faf903ba5d3cfeb49554dd3" diff --git a/docs/pyproject.toml b/docs/pyproject.toml index 47a336674d..205b142c76 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -22,6 +22,7 @@ sphinx-multiversion-scylla = "^0.3.1" Sphinx = "^7.3.7" scales = "^1.0.9" six = ">=1.9" +tornado = ">=4.0,<5.0" [build-system] requires = ["poetry>=1.8.0"] From f934f22c7a887c17cad3f2871cea1a5e080aced9 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Tue, 29 Oct 2024 14:35:02 +0200 Subject: [PATCH 272/551] Add support for macos-15 wheels builds that was introduced recently in github actions https://github.com/github/roadmap/issues/986 and someone was asking for those in #383 Fix: #383 --- .github/workflows/build-push.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 8a7ce9937a..1e01932d29 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -40,6 +40,9 @@ jobs: - os: macos-latest platform: all + - os: macos-13 + platform: all + - os: macos-latest platform: PyPy @@ -103,7 +106,7 @@ jobs: - name: Overwrite for MacOs if: runner.os == 'MacOs' && matrix.platform == 'all' run: | - echo "CIBW_BUILD=cp37* cp38*" >> $GITHUB_ENV + echo "CIBW_BUILD=cp38* cp39* cp310* cp311* cp312*" >> $GITHUB_ENV echo "CIBW_BEFORE_TEST_MACOS=pip install -r {project}/test-requirements.txt pytest" >> $GITHUB_ENV - name: Overwrite for MacOs PyPy From 23b342fdb766f5d2c0b49ced308768d0480da2a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Thu, 15 Aug 2024 18:37:32 +0200 Subject: [PATCH 273/551] Move run_integration_test.sh to scripts folder We have just 3 scripts (1 soon to be gone because it is unnecessary), so there is no need to have 2 folders for them. --- .github/workflows/integration-tests.yml | 4 ++-- {ci => scripts}/run_integration_test.sh | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename {ci => scripts}/run_integration_test.sh (100%) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index e2f2ece3d8..3de42ffe3e 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -33,10 +33,10 @@ jobs: run: | export EVENT_LOOP_MANAGER=${{ matrix.event_loop_manager }} export SCYLLA_VERSION='release:5.1' - ./ci/run_integration_test.sh tests/integration/standard/ tests/integration/cqlengine/ + ./scripts/run_integration_test.sh tests/integration/standard/ tests/integration/cqlengine/ - name: Test tablets run: | export EVENT_LOOP_MANAGER=${{ matrix.event_loop_manager }} export SCYLLA_VERSION='release:6.0.2' - ./ci/run_integration_test.sh tests/integration/experiments/ + ./scripts/run_integration_test.sh tests/integration/experiments/ diff --git a/ci/run_integration_test.sh b/scripts/run_integration_test.sh similarity index 100% rename from ci/run_integration_test.sh rename to scripts/run_integration_test.sh From 98525b1836f766e69b7d5501f57bdb5b9e9cc653 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Karol=20Bary=C5=82a?= Date: Fri, 16 Aug 2024 18:15:22 +0200 Subject: [PATCH 274/551] Remove unnecessary files Some of them are only used by upstream, some are may have been used by us in the past. Whatever the reason, we don't need them anymore, so this commit removes them. This will introduce conflicts when merging upstream but: - Those conflicts will be easy to solve - just pick our version - When we move to more granular way of pulling changes we will get rid of redundant commits from history. We could even automatically drop commits that only touch those files. --- .gitignore | 11 + Jenkinsfile | 688 ---------- Jenkinsfile.bak | 873 ------------ appveyor.yml | 26 - appveyor/appveyor.ps1 | 80 -- appveyor/run_test.ps1 | 49 - build.yaml.bak | 264 ---- ci/install_openssl.sh | 22 - docs.yaml | 75 - doxyfile | 2339 -------------------------------- test-datastax-requirements.txt | 3 - tox.ini | 51 - 12 files changed, 11 insertions(+), 4470 deletions(-) delete mode 100644 Jenkinsfile delete mode 100644 Jenkinsfile.bak delete mode 100644 appveyor.yml delete mode 100644 appveyor/appveyor.ps1 delete mode 100644 appveyor/run_test.ps1 delete mode 100644 build.yaml.bak delete mode 100755 ci/install_openssl.sh delete mode 100644 docs.yaml delete mode 100644 doxyfile delete mode 100644 test-datastax-requirements.txt delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore index 88e934235e..e0dbe9c859 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,14 @@ tests/unit/cython/bytesio_testhelper.c #iPython *.ipynb +# Files from upstream that we don't need +Jenkinsfile +Jenkinsfile.bak +appveyor.yml +appveyor/appveyor.ps1 +appveyor/run_test.ps1 +build.yaml.bak +docs.yaml +doxyfile +tox.ini +test-datastax-requirements.txt \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 37b37ccb5e..0000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,688 +0,0 @@ -#!groovy -/* - -There are multiple combinations to test the python driver. - -Test Profiles: - - Full: Execute all unit and integration tests, including long tests. - Standard: Execute unit and integration tests. - Smoke Tests: Execute a small subset of tests. - EVENT_LOOP: Execute a small subset of tests selected to test EVENT_LOOPs. - -Matrix Types: - - Full: All server versions, python runtimes tested with and without Cython. - Develop: Smaller matrix for dev purpose. - Cassandra: All cassandra server versions. - Dse: All dse server versions. - -Parameters: - - EVENT_LOOP: 'LIBEV' (Default), 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED' - CYTHON: Default, 'True', 'False' - -*/ - -@Library('dsdrivers-pipeline-lib@develop') -import com.datastax.jenkins.drivers.python.Slack - -slack = new Slack() - -// Define our predefined matrices -// -// Smoke tests are CI-friendly test configuration. Currently-supported Python version + modern C*/DSE instances. -// We also avoid cython since it's tested as part of the nightlies. -matrices = [ - "FULL": [ - "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0', 'dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'], - "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] - ], - "DEVELOP": [ - "SERVER": ['2.1', '3.11', 'dse-6.8.30'], - "RUNTIME": ['2.7.18', '3.6.10'], - "CYTHON": ["True", "False"] - ], - "CASSANDRA": [ - "SERVER": ['2.1', '2.2', '3.0', '3.11', '4.0'], - "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] - ], - "DSE": [ - "SERVER": ['dse-5.0.15', 'dse-5.1.35', 'dse-6.0.18', 'dse-6.7.17', 'dse-6.8.30'], - "RUNTIME": ['2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - "CYTHON": ["True", "False"] - ], - "SMOKE": [ - "SERVER": ['3.11', '4.0', 'dse-6.8.30'], - "RUNTIME": ['3.7.7', '3.8.3'], - "CYTHON": ["False"] - ] -] - -def initializeSlackContext() { - /* - Based on git branch/commit, configure the build context and env vars. - */ - - def driver_display_name = 'Cassandra Python Driver' - if (env.GIT_URL.contains('riptano/python-driver')) { - driver_display_name = 'private ' + driver_display_name - } else if (env.GIT_URL.contains('python-dse-driver')) { - driver_display_name = 'DSE Python Driver' - } - env.DRIVER_DISPLAY_NAME = driver_display_name - env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" - env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - env.GITHUB_BRANCH_URL = "${env.GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" - env.GITHUB_COMMIT_URL = "${env.GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" -} - -def getBuildContext() { - /* - Based on schedule and parameters, configure the build context and env vars. - */ - - def profile = "${params.PROFILE}" - def EVENT_LOOP = "${params.EVENT_LOOP.toLowerCase()}" - matrixType = "SMOKE" - developBranchPattern = ~"((dev|long)-)?python-.*" - - if (developBranchPattern.matcher(env.BRANCH_NAME).matches()) { - matrixType = "DEVELOP" - if (env.BRANCH_NAME.contains("long")) { - profile = "FULL" - } - } - - // Check if parameters were set explicitly - if (params.MATRIX != "DEFAULT") { - matrixType = params.MATRIX - } - - matrix = matrices[matrixType].clone() - if (params.CYTHON != "DEFAULT") { - matrix["CYTHON"] = [params.CYTHON] - } - - if (params.SERVER_VERSION != "DEFAULT") { - matrix["SERVER"] = [params.SERVER_VERSION] - } - - if (params.PYTHON_VERSION != "DEFAULT") { - matrix["RUNTIME"] = [params.PYTHON_VERSION] - } - - if (params.CI_SCHEDULE == "WEEKNIGHTS") { - matrix["SERVER"] = params.CI_SCHEDULE_SERVER_VERSION.split(' ') - matrix["RUNTIME"] = params.CI_SCHEDULE_PYTHON_VERSION.split(' ') - } - - context = [ - vars: [ - "PROFILE=${profile}", - "EVENT_LOOP=${EVENT_LOOP}" - ], - matrix: matrix - ] - - return context -} - -def buildAndTest(context) { - initializeEnvironment() - installDriverAndCompileExtensions() - - try { - executeTests() - } finally { - junit testResults: '*_results.xml' - } -} - -def getMatrixBuilds(buildContext) { - def tasks = [:] - matrix = buildContext.matrix - - matrix["SERVER"].each { serverVersion -> - matrix["RUNTIME"].each { runtimeVersion -> - matrix["CYTHON"].each { cythonFlag -> - def taskVars = [ - "CASSANDRA_VERSION=${serverVersion}", - "PYTHON_VERSION=${runtimeVersion}", - "CYTHON_ENABLED=${cythonFlag}" - ] - def cythonDesc = cythonFlag == "True" ? ", Cython": "" - tasks["${serverVersion}, py${runtimeVersion}${cythonDesc}"] = { - node("${OS_VERSION}") { - scm_variables = checkout scm - env.GIT_COMMIT = scm_variables.get('GIT_COMMIT') - env.GIT_URL = scm_variables.get('GIT_URL') - initializeSlackContext() - - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - slack.notifyChannel() - } - - withEnv(taskVars) { - buildAndTest(context) - } - } - } - } - } - } - return tasks -} - -def initializeEnvironment() { - sh label: 'Initialize the environment', script: '''#!/bin/bash -lex - pyenv global ${PYTHON_VERSION} - sudo apt-get install socat - pip install --upgrade pip - pip install -U setuptools - pip install ${HOME}/ccm - ''' - - // Determine if server version is Apache CassandraⓇ or DataStax Enterprise - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { - sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex - pip install -r test-datastax-requirements.txt - ''' - } else { - sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex - pip install -r test-requirements.txt - ''' - - sh label: 'Uninstall the geomet dependency since it is not required for Cassandra', script: '''#!/bin/bash -lex - pip uninstall -y geomet - ''' - } - - sh label: 'Install unit test modules', script: '''#!/bin/bash -lex - pip install nose-ignore-docstring nose-exclude service_identity - ''' - - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install cython modules', script: '''#!/bin/bash -lex - pip install cython numpy - ''' - } - - sh label: 'Download Apache CassandraⓇ or DataStax Enterprise', script: '''#!/bin/bash -lex - . ${CCM_ENVIRONMENT_SHELL} ${CASSANDRA_VERSION} - ''' - - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { - env.DSE_FIXED_VERSION = env.CASSANDRA_VERSION.split('-')[1] - sh label: 'Update environment for DataStax Enterprise', script: '''#!/bin/bash -le - cat >> ${HOME}/environment.txt << ENVIRONMENT_EOF -CCM_CASSANDRA_VERSION=${DSE_FIXED_VERSION} # maintain for backwards compatibility -CCM_VERSION=${DSE_FIXED_VERSION} -CCM_SERVER_TYPE=dse -DSE_VERSION=${DSE_FIXED_VERSION} -CCM_IS_DSE=true -CCM_BRANCH=${DSE_FIXED_VERSION} -DSE_BRANCH=${DSE_FIXED_VERSION} -ENVIRONMENT_EOF - ''' - } - - sh label: 'Display Python and environment information', script: '''#!/bin/bash -le - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - python --version - pip --version - pip freeze - printenv | sort - ''' -} - -def installDriverAndCompileExtensions() { - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install the driver and compile with C extensions with Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace - ''' - } else { - sh label: 'Install the driver and compile with C extensions without Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace --no-cython - ''' - } -} - -def executeStandardTests() { - - sh label: 'Execute unit tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - ''' - - sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true - - # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP=${EVENT_LOOP} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true - ''' - - sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true - ''' - - sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - ''' - - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { - sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true - ''' - } - - sh label: 'Execute DataStax Constellation integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true - ''' - - if (env.PROFILE == 'FULL') { - sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - ''' - } -} - -def executeDseSmokeTests() { - sh label: 'Execute profile DataStax Enterprise smoke test integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true - ''' -} - -def executeEventLoopTests() { - sh label: 'Execute profile event loop manager integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_TESTS=( - "tests/integration/standard/test_cluster.py" - "tests/integration/standard/test_concurrent.py" - "tests/integration/standard/test_connection.py" - "tests/integration/standard/test_control_connection.py" - "tests/integration/standard/test_metrics.py" - "tests/integration/standard/test_query.py" - "tests/integration/simulacron/test_endpoint.py" - "tests/integration/long/test_ssl.py" - ) - EVENT_LOOP=${EVENT_LOOP} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true - ''' -} - -def executeTests() { - switch(env.PROFILE) { - case 'DSE-SMOKE-TEST': - executeDseSmokeTests() - break - case 'EVENT_LOOP': - executeEventLoopTests() - break - default: - executeStandardTests() - break - } -} - - -// TODO move this in the shared lib -def getDriverMetricType() { - metric_type = 'oss' - if (env.GIT_URL.contains('riptano/python-driver')) { - metric_type = 'oss-private' - } else if (env.GIT_URL.contains('python-dse-driver')) { - metric_type = 'dse' - } - return metric_type -} - -def describeBuild(buildContext) { - script { - def runtimes = buildContext.matrix["RUNTIME"] - def serverVersions = buildContext.matrix["SERVER"] - def numBuilds = runtimes.size() * serverVersions.size() * buildContext.matrix["CYTHON"].size() - currentBuild.displayName = "${env.PROFILE} (${env.EVENT_LOOP} | ${numBuilds} builds)" - currentBuild.description = "${env.PROFILE} build testing servers (${serverVersions.join(', ')}) against Python (${runtimes.join(', ')}) using ${env.EVENT_LOOP} event loop manager" - } -} - -def scheduleTriggerJobName() { - "drivers/python/oss/master/disabled" -} - -pipeline { - agent none - - // Global pipeline timeout - options { - disableConcurrentBuilds() - timeout(time: 10, unit: 'HOURS') // TODO timeout should be per build - buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts - numToKeepStr: '50')) // Keep only the last 50 build records - } - - parameters { - choice( - name: 'ADHOC_BUILD_TYPE', - choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], - description: '''

Perform a adhoc build operation

- - - - - - - - - - - - - - - -
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') - choice( - name: 'PROFILE', - choices: ['STANDARD', 'FULL', 'DSE-SMOKE-TEST', 'EVENT_LOOP'], - description: '''

Profile to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
STANDARDExecute the standard tests for the driver
FULLExecute all tests for the driver, including long tests.
DSE-SMOKE-TESTExecute only the DataStax Enterprise smoke tests
EVENT_LOOPExecute only the event loop tests for the specified event loop manager (see: EVENT_LOOP)
''') - choice( - name: 'MATRIX', - choices: ['DEFAULT', 'SMOKE', 'FULL', 'DEVELOP', 'CASSANDRA', 'DSE'], - description: '''

The matrix for the build.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
DEFAULTDefault to the build context.
SMOKEBasic smoke tests for current Python runtimes + C*/DSE versions, no Cython
FULLAll server versions, python runtimes tested with and without Cython.
DEVELOPSmaller matrix for dev purpose.
CASSANDRAAll cassandra server versions.
DSEAll dse server versions.
''') - choice( - name: 'PYTHON_VERSION', - choices: ['DEFAULT', '2.7.18', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - description: 'Python runtime version. Default to the build context.') - choice( - name: 'SERVER_VERSION', - choices: ['DEFAULT', - '2.1', // Legacy Apache CassandraⓇ - '2.2', // Legacy Apache CassandraⓇ - '3.0', // Previous Apache CassandraⓇ - '3.11', // Current Apache CassandraⓇ - '4.0', // Development Apache CassandraⓇ - 'dse-5.0.15', // Long Term Support DataStax Enterprise - 'dse-5.1.35', // Legacy DataStax Enterprise - 'dse-6.0.18', // Previous DataStax Enterprise - 'dse-6.7.17', // Previous DataStax Enterprise - 'dse-6.8.30', // Current DataStax Enterprise - ], - description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
DEFAULTDefault to the build context.
2.1Apache CassandraⓇ; v2.1.x
2.2Apache CassandarⓇ; v2.2.x
3.0Apache CassandraⓇ v3.0.x
3.11Apache CassandraⓇ v3.11.x
4.0Apache CassandraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0.15DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1.35DataStax Enterprise v5.1.x
dse-6.0.18DataStax Enterprise v6.0.x
dse-6.7.17DataStax Enterprise v6.7.x
dse-6.8.30DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') - choice( - name: 'CYTHON', - choices: ['DEFAULT', 'True', 'False'], - description: '''

Flag to determine if Cython should be enabled

- - - - - - - - - - - - - - - - - - - -
ChoiceDescription
DefaultDefault to the build context.
TrueEnable Cython
FalseDisable Cython
''') - choice( - name: 'EVENT_LOOP', - choices: ['LIBEV', 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED'], - description: '''

Event loop manager to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
LIBEVA full-featured and high-performance event loop that is loosely modeled after libevent, but without its limitations and bugs
GEVENTA co-routine -based Python networking library that uses greenlet to provide a high-level synchronous API on top of the libev or libuv event loop
EVENTLETA concurrent networking library for Python that allows you to change how you run your code, not how you write it
ASYNCIOA library to write concurrent code using the async/await syntax
ASYNCOREA module provides the basic infrastructure for writing asynchronous socket service clients and servers
TWISTEDAn event-driven networking engine written in Python and licensed under the open source MIT license
''') - choice( - name: 'CI_SCHEDULE', - choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS'], - description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_PYTHON_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing python version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_SERVER_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing server version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - } - - triggers { - parameterizedCron((scheduleTriggerJobName() == env.JOB_NAME) ? """ - # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18 3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1.35 dse-6.0.18 dse-6.7.17 - """ : "") - } - - environment { - OS_VERSION = 'ubuntu/bionic64/python-driver' - CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - CCM_MAX_HEAP_SIZE = '1536M' - } - - stages { - stage ('Build and Test') { - when { - beforeAgent true - allOf { - not { buildingTag() } - } - } - - steps { - script { - context = getBuildContext() - withEnv(context.vars) { - describeBuild(context) - - // build and test all builds - parallel getMatrixBuilds(context) - - slack.notifyChannel(currentBuild.currentResult) - } - } - } - } - - } -} diff --git a/Jenkinsfile.bak b/Jenkinsfile.bak deleted file mode 100644 index 87b20804ca..0000000000 --- a/Jenkinsfile.bak +++ /dev/null @@ -1,873 +0,0 @@ -#!groovy - -def initializeEnvironment() { - env.DRIVER_DISPLAY_NAME = 'Cassandra Python Driver' - env.DRIVER_METRIC_TYPE = 'oss' - if (env.GIT_URL.contains('riptano/python-driver')) { - env.DRIVER_DISPLAY_NAME = 'private ' + env.DRIVER_DISPLAY_NAME - env.DRIVER_METRIC_TYPE = 'oss-private' - } else if (env.GIT_URL.contains('python-dse-driver')) { - env.DRIVER_DISPLAY_NAME = 'DSE Python Driver' - env.DRIVER_METRIC_TYPE = 'dse' - } - - env.GIT_SHA = "${env.GIT_COMMIT.take(7)}" - env.GITHUB_PROJECT_URL = "https://${GIT_URL.replaceFirst(/(git@|http:\/\/|https:\/\/)/, '').replace(':', '/').replace('.git', '')}" - env.GITHUB_BRANCH_URL = "${GITHUB_PROJECT_URL}/tree/${env.BRANCH_NAME}" - env.GITHUB_COMMIT_URL = "${GITHUB_PROJECT_URL}/commit/${env.GIT_COMMIT}" - - sh label: 'Assign Python global environment', script: '''#!/bin/bash -lex - pyenv global ${PYTHON_VERSION} - ''' - - sh label: 'Install socat; required for unix socket tests', script: '''#!/bin/bash -lex - sudo apt-get install socat - ''' - - sh label: 'Install the latest setuptools', script: '''#!/bin/bash -lex - pip install --upgrade pip - pip install -U setuptools - ''' - - sh label: 'Install CCM', script: '''#!/bin/bash -lex - pip install ${HOME}/ccm - ''' - - // Determine if server version is Apache Cassandra� or DataStax Enterprise - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse') { - sh label: 'Install DataStax Enterprise requirements', script: '''#!/bin/bash -lex - pip install -r test-datastax-requirements.txt - ''' - } else { - sh label: 'Install Apache CassandraⓇ requirements', script: '''#!/bin/bash -lex - pip install -r test-requirements.txt - ''' - - sh label: 'Uninstall the geomet dependency since it is not required for Cassandra', script: '''#!/bin/bash -lex - pip uninstall -y geomet - ''' - - } - - sh label: 'Install unit test modules', script: '''#!/bin/bash -lex - pip install nose-ignore-docstring nose-exclude service_identity - ''' - - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install cython modules', script: '''#!/bin/bash -lex - pip install cython numpy - ''' - } - - sh label: 'Download Apache CassandraⓇ or DataStax Enterprise', script: '''#!/bin/bash -lex - . ${CCM_ENVIRONMENT_SHELL} ${CASSANDRA_VERSION} - ''' - - sh label: 'Display Python and environment information', script: '''#!/bin/bash -le - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - python --version - pip --version - printenv | sort - ''' -} - -def installDriverAndCompileExtensions() { - if (env.CYTHON_ENABLED == 'True') { - sh label: 'Install the driver and compile with C extensions with Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace - ''' - } else { - sh label: 'Install the driver and compile with C extensions without Cython', script: '''#!/bin/bash -lex - python setup.py build_ext --inplace --no-cython - ''' - } -} - -def executeStandardTests() { - - sh label: 'Execute unit tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - ''' - - sh label: 'Execute Simulacron integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - SIMULACRON_JAR="${HOME}/simulacron.jar" - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true - - # Run backpressure tests separately to avoid memory issue - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_1_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_paused_connections || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_2_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_queued_requests_timeout || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_3_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_cluster_busy || true - SIMULACRON_JAR=${SIMULACRON_JAR} EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --exclude test_backpressure.py --xunit-file=simulacron_backpressure_4_results.xml tests/integration/simulacron/test_backpressure.py:TCPBackpressureTests.test_node_busy || true - ''' - - sh label: 'Execute CQL engine integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true - ''' - - sh label: 'Execute Apache CassandraⓇ integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variables - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - ''' - - if (env.CASSANDRA_VERSION.split('-')[0] == 'dse' && env.CASSANDRA_VERSION.split('-')[1] != '4.8') { - sh label: 'Execute DataStax Enterprise integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CASSANDRA_DIR=${CCM_INSTALL_DIR} DSE_VERSION=${DSE_VERSION} ADS_HOME="${HOME}/" VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true - ''' - } - - sh label: 'Execute DataStax Constellation integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CLOUD_PROXY_PATH="${HOME}/proxy/" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true - ''' - - if (env.EXECUTE_LONG_TESTS == 'True') { - sh label: 'Execute long running integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - ''' - } -} - -def executeDseSmokeTests() { - sh label: 'Execute profile DataStax Enterprise smoke test integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} DSE_VERSION=${DSE_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true - ''' -} - -def executeEventLoopTests() { - sh label: 'Execute profile event loop manager integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_TESTS=( - "tests/integration/standard/test_cluster.py" - "tests/integration/standard/test_concurrent.py" - "tests/integration/standard/test_connection.py" - "tests/integration/standard/test_control_connection.py" - "tests/integration/standard/test_metrics.py" - "tests/integration/standard/test_query.py" - "tests/integration/simulacron/test_endpoint.py" - "tests/integration/long/test_ssl.py" - ) - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} CCM_ARGS="${CCM_ARGS}" DSE_VERSION=${DSE_VERSION} CASSANDRA_VERSION=${CCM_CASSANDRA_VERSION} MAPPED_CASSANDRA_VERSION=${MAPPED_CASSANDRA_VERSION} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true - ''' -} - -def executeUpgradeTests() { - sh label: 'Execute profile upgrade integration tests', script: '''#!/bin/bash -lex - # Load CCM environment variable - set -o allexport - . ${HOME}/environment.txt - set +o allexport - - EVENT_LOOP_MANAGER=${EVENT_LOOP_MANAGER} VERIFY_CYTHON=${CYTHON_ENABLED} nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true - ''' -} - -def executeTests() { - switch(params.PROFILE) { - case 'DSE-SMOKE-TEST': - executeDseSmokeTests() - break - case 'EVENT-LOOP': - executeEventLoopTests() - break - case 'UPGRADE': - executeUpgradeTests() - break - default: - executeStandardTests() - break - } -} - -def notifySlack(status = 'started') { - // Set the global pipeline scoped environment (this is above each matrix) - env.BUILD_STATED_SLACK_NOTIFIED = 'true' - - def buildType = 'Commit' - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - buildType = "${params.CI_SCHEDULE.toLowerCase().capitalize()}" - } - - def color = 'good' // Green - if (status.equalsIgnoreCase('aborted')) { - color = '808080' // Grey - } else if (status.equalsIgnoreCase('unstable')) { - color = 'warning' // Orange - } else if (status.equalsIgnoreCase('failed')) { - color = 'danger' // Red - } - - def message = """Build ${status} for ${env.DRIVER_DISPLAY_NAME} [${buildType}] -<${env.GITHUB_BRANCH_URL}|${env.BRANCH_NAME}> - <${env.RUN_DISPLAY_URL}|#${env.BUILD_NUMBER}> - <${env.GITHUB_COMMIT_URL}|${env.GIT_SHA}>""" - if (params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION') { - message += " - ${params.CI_SCHEDULE_PYTHON_VERSION} - ${params.EVENT_LOOP_MANAGER}" - } - if (!status.equalsIgnoreCase('Started')) { - message += """ -${status} after ${currentBuild.durationString - ' and counting'}""" - } - - slackSend color: "${color}", - channel: "#python-driver-dev-bots", - message: "${message}" -} - -def submitCIMetrics(buildType) { - long durationMs = currentBuild.duration - long durationSec = durationMs / 1000 - long nowSec = (currentBuild.startTimeInMillis + durationMs) / 1000 - def branchNameNoPeriods = env.BRANCH_NAME.replaceAll('\\.', '_') - def durationMetric = "okr.ci.python.${env.DRIVER_METRIC_TYPE}.${buildType}.${branchNameNoPeriods} ${durationSec} ${nowSec}" - - timeout(time: 1, unit: 'MINUTES') { - withCredentials([string(credentialsId: 'lab-grafana-address', variable: 'LAB_GRAFANA_ADDRESS'), - string(credentialsId: 'lab-grafana-port', variable: 'LAB_GRAFANA_PORT')]) { - withEnv(["DURATION_METRIC=${durationMetric}"]) { - sh label: 'Send runtime metrics to labgrafana', script: '''#!/bin/bash -lex - echo "${DURATION_METRIC}" | nc -q 5 ${LAB_GRAFANA_ADDRESS} ${LAB_GRAFANA_PORT} - ''' - } - } - } -} - -def describePerCommitStage() { - script { - def type = 'standard' - def serverDescription = 'current Apache CassandaraⓇ and supported DataStax Enterprise versions' - if (env.BRANCH_NAME ==~ /long-python.*/) { - type = 'long' - } else if (env.BRANCH_NAME ==~ /dev-python.*/) { - type = 'dev' - } - - currentBuild.displayName = "Per-Commit (${env.EVENT_LOOP_MANAGER} | ${type.capitalize()})" - currentBuild.description = "Per-Commit build and ${type} testing of ${serverDescription} against Python v2.7.18 and v3.5.9 using ${env.EVENT_LOOP_MANAGER} event loop manager" - } - - sh label: 'Describe the python environment', script: '''#!/bin/bash -lex - python -V - pip freeze - ''' -} - -def describeScheduledTestingStage() { - script { - def type = params.CI_SCHEDULE.toLowerCase().capitalize() - def displayName = "${type} schedule (${env.EVENT_LOOP_MANAGER}" - if (env.CYTHON_ENABLED == 'True') { - displayName += " | Cython" - } - if (params.PROFILE != 'NONE') { - displayName += " | ${params.PROFILE}" - } - displayName += ")" - currentBuild.displayName = displayName - - def serverVersionDescription = "${params.CI_SCHEDULE_SERVER_VERSION.replaceAll(' ', ', ')} server version(s) in the matrix" - def pythonVersionDescription = "${params.CI_SCHEDULE_PYTHON_VERSION.replaceAll(' ', ', ')} Python version(s) in the matrix" - def description = "${type} scheduled testing using ${env.EVENT_LOOP_MANAGER} event loop manager" - if (env.CYTHON_ENABLED == 'True') { - description += ", with Cython enabled" - } - if (params.PROFILE != 'NONE') { - description += ", ${params.PROFILE} profile" - } - description += ", ${serverVersionDescription}, and ${pythonVersionDescription}" - currentBuild.description = description - } -} - -def describeAdhocTestingStage() { - script { - def serverType = params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[0] - def serverDisplayName = 'Apache CassandaraⓇ' - def serverVersion = " v${serverType}" - if (serverType == 'ALL') { - serverDisplayName = "all ${serverDisplayName} and DataStax Enterprise server versions" - serverVersion = '' - } else { - try { - serverVersion = " v${env.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION.split('-')[1]}" - } catch (e) { - ;; // no-op - } - if (serverType == 'dse') { - serverDisplayName = 'DataStax Enterprise' - } - } - def displayName = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION} for v${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION} (${env.EVENT_LOOP_MANAGER}" - if (env.CYTHON_ENABLED == 'True') { - displayName += " | Cython" - } - if (params.PROFILE != 'NONE') { - displayName += " | ${params.PROFILE}" - } - displayName += ")" - currentBuild.displayName = displayName - - def description = "Testing ${serverDisplayName} ${serverVersion} using ${env.EVENT_LOOP_MANAGER} against Python ${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" - if (env.CYTHON_ENABLED == 'True') { - description += ", with Cython" - } - if (params.PROFILE == 'NONE') { - if (params.EXECUTE_LONG_TESTS) { - description += ", with" - } else { - description += ", without" - } - description += " long tests executed" - } else { - description += ", ${params.PROFILE} profile" - } - currentBuild.description = description - } -} - -def branchPatternCron = ~"(master)" -def riptanoPatternCron = ~"(riptano)" - -pipeline { - agent none - - // Global pipeline timeout - options { - timeout(time: 10, unit: 'HOURS') - buildDiscarder(logRotator(artifactNumToKeepStr: '10', // Keep only the last 10 artifacts - numToKeepStr: '50')) // Keep only the last 50 build records - } - - parameters { - choice( - name: 'ADHOC_BUILD_TYPE', - choices: ['BUILD', 'BUILD-AND-EXECUTE-TESTS'], - description: '''

Perform a adhoc build operation

- - - - - - - - - - - - - - - -
ChoiceDescription
BUILDPerforms a Per-Commit build
BUILD-AND-EXECUTE-TESTSPerforms a build and executes the integration and unit tests
''') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION', - choices: ['2.7.18', '3.4.10', '3.5.9', '3.6.10', '3.7.7', '3.8.3'], - description: 'Python version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY!') - choice( - name: 'ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION', - choices: ['2.1', // Legacy Apache CassandraⓇ - '2.2', // Legacy Apache CassandraⓇ - '3.0', // Previous Apache CassandraⓇ - '3.11', // Current Apache CassandraⓇ - '4.0', // Development Apache CassandraⓇ - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Previous DataStax Enterprise - 'dse-6.8', // Current DataStax Enterprise - 'ALL'], - description: '''Apache CassandraⓇ and DataStax Enterprise server version to use for adhoc BUILD-AND-EXECUTE-TESTS ONLY! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
2.1Apache CassandaraⓇ; v2.1.x
2.2Apache CassandarⓇ; v2.2.x
3.0Apache CassandaraⓇ v3.0.x
3.11Apache CassandaraⓇ v3.11.x
4.0Apache CassandaraⓇ v4.x (CURRENTLY UNDER DEVELOPMENT)
dse-5.0DataStax Enterprise v5.0.x (Long Term Support)
dse-5.1DataStax Enterprise v5.1.x
dse-6.0DataStax Enterprise v6.0.x
dse-6.7DataStax Enterprise v6.7.x
dse-6.8DataStax Enterprise v6.8.x (CURRENTLY UNDER DEVELOPMENT)
''') - booleanParam( - name: 'CYTHON', - defaultValue: false, - description: 'Flag to determine if Cython should be enabled for scheduled or adhoc builds') - booleanParam( - name: 'EXECUTE_LONG_TESTS', - defaultValue: false, - description: 'Flag to determine if long integration tests should be executed for scheduled or adhoc builds') - choice( - name: 'EVENT_LOOP_MANAGER', - choices: ['LIBEV', 'GEVENT', 'EVENTLET', 'ASYNCIO', 'ASYNCORE', 'TWISTED'], - description: '''

Event loop manager to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
LIBEVA full-featured and high-performance event loop that is loosely modeled after libevent, but without its limitations and bugs
GEVENTA co-routine -based Python networking library that uses greenlet to provide a high-level synchronous API on top of the libev or libuv event loop
EVENTLETA concurrent networking library for Python that allows you to change how you run your code, not how you write it
ASYNCIOA library to write concurrent code using the async/await syntax
ASYNCOREA module provides the basic infrastructure for writing asynchronous socket service clients and servers
TWISTEDAn event-driven networking engine written in Python and licensed under the open source MIT license
''') - choice( - name: 'PROFILE', - choices: ['NONE', 'DSE-SMOKE-TEST', 'EVENT-LOOP', 'UPGRADE'], - description: '''

Profile to utilize for scheduled or adhoc builds

- - - - - - - - - - - - - - - - - - - - - - - -
ChoiceDescription
NONEExecute the standard tests for the driver
DSE-SMOKE-TESTExecute only the DataStax Enterprise smoke tests
EVENT-LOOPExecute only the event loop tests for the specified event loop manager (see: EVENT_LOOP_MANAGER)
UPGRADEExecute only the upgrade tests
''') - choice( - name: 'CI_SCHEDULE', - choices: ['DO-NOT-CHANGE-THIS-SELECTION', 'WEEKNIGHTS', 'WEEKENDS'], - description: 'CI testing schedule to execute periodically scheduled builds and tests of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_PYTHON_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing python version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - string( - name: 'CI_SCHEDULE_SERVER_VERSION', - defaultValue: 'DO-NOT-CHANGE-THIS-SELECTION', - description: 'CI testing server version to utilize for scheduled test runs of the driver (DO NOT CHANGE THIS SELECTION)') - } - - triggers { - parameterizedCron((branchPatternCron.matcher(env.BRANCH_NAME).matches() && !riptanoPatternCron.matcher(GIT_URL).find()) ? """ - # Every weeknight (Monday - Friday) around 4:00 AM - # These schedules will run with and without Cython enabled for Python v2.7.18 and v3.5.9 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 1-5 %CI_SCHEDULE=WEEKNIGHTS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.2 3.11 dse-5.1 dse-6.0 dse-6.7 - - # Every Saturday around 12:00, 4:00 and 8:00 PM - # These schedules are for weekly libev event manager runs with and without Cython for most of the Python versions (excludes v3.5.9.x) - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.4.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - H 12 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=LIBEV;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 3.0 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly gevent event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 16 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=GEVENT;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly eventlet event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 20 * * 6 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=EVENTLET;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - - # Every Sunday around 12:00 and 4:00 AM - # These schedules are for weekly asyncore event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 0 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=ASYNCORE;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - # These schedules are for weekly twisted event manager event loop only runs with and without Cython for most of the Python versions (excludes v3.4.10.x) - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=2.7.18;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.5.9;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.6.10;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.7.7;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - H 4 * * 7 %CI_SCHEDULE=WEEKENDS;EVENT_LOOP_MANAGER=TWISTED;PROFILE=EVENT-LOOP;CI_SCHEDULE_PYTHON_VERSION=3.8.3;CI_SCHEDULE_SERVER_VERSION=2.1 2.2 3.0 3.11 dse-5.1 dse-6.0 dse-6.7 - """ : "") - } - - environment { - OS_VERSION = 'ubuntu/bionic64/python-driver' - CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" - EVENT_LOOP_MANAGER = "${params.EVENT_LOOP_MANAGER.toLowerCase()}" - EXECUTE_LONG_TESTS = "${params.EXECUTE_LONG_TESTS ? 'True' : 'False'}" - CCM_ENVIRONMENT_SHELL = '/usr/local/bin/ccm_environment.sh' - CCM_MAX_HEAP_SIZE = '1536M' - } - - stages { - stage ('Per-Commit') { - options { - timeout(time: 2, unit: 'HOURS') - } - when { - beforeAgent true - branch pattern: '((dev|long)-)?python-.*', comparator: 'REGEXP' - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE == 'DO-NOT-CHANGE-THIS-SELECTION' } - not { buildingTag() } - } - } - - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '3.11', // Current Apache Cassandra - 'dse-6.8' // Current DataStax Enterprise - } - axis { - name 'PYTHON_VERSION' - values '2.7.18', '3.5.9' - } - axis { - name 'CYTHON_ENABLED' - values 'False' - } - } - - agent { - label "${OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describePerCommitStage() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - - script { - if (env.BRANCH_NAME ==~ /long-python.*/) { - withEnv(["EXECUTE_LONG_TESTS=True"]) { - executeTests() - } - } - else { - executeTests() - } - } - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - post { - always { - node('master') { - submitCIMetrics('commit') - } - } - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - - stage ('Scheduled-Testing') { - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD' } - expression { params.CI_SCHEDULE != 'DO-NOT-CHANGE-THIS-SELECTION' } - not { buildingTag() } - } - } - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '2.1', // Legacy Apache Cassandra - '2.2', // Legacy Apache Cassandra - '3.0', // Previous Apache Cassandra - '3.11', // Current Apache Cassandra - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7' // Current DataStax Enterprise - } - axis { - name 'CYTHON_ENABLED' - values 'True', 'False' - } - } - when { - beforeAgent true - allOf { - expression { return params.CI_SCHEDULE_SERVER_VERSION.split(' ').any { it =~ /(ALL|${env.CASSANDRA_VERSION})/ } } - } - } - - environment { - PYTHON_VERSION = "${params.CI_SCHEDULE_PYTHON_VERSION}" - } - agent { - label "${OS_VERSION}" - } - - stages { - stage('Initialize-Environment') { - steps { - initializeEnvironment() - script { - if (env.BUILD_STATED_SLACK_NOTIFIED != 'true') { - notifySlack() - } - } - } - } - stage('Describe-Build') { - steps { - describeScheduledTestingStage() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - executeTests() - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - post { - aborted { - notifySlack('aborted') - } - success { - notifySlack('completed') - } - unstable { - notifySlack('unstable') - } - failure { - notifySlack('FAILED') - } - } - } - - - stage('Adhoc-Testing') { - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_TYPE == 'BUILD-AND-EXECUTE-TESTS' } - not { buildingTag() } - } - } - - environment { - CYTHON_ENABLED = "${params.CYTHON ? 'True' : 'False'}" - PYTHON_VERSION = "${params.ADHOC_BUILD_AND_EXECUTE_TESTS_PYTHON_VERSION}" - } - - matrix { - axes { - axis { - name 'CASSANDRA_VERSION' - values '2.1', // Legacy Apache Cassandra - '2.2', // Legacy Apache Cassandra - '3.0', // Previous Apache Cassandra - '3.11', // Current Apache Cassandra - '4.0', // Development Apache Cassandra - 'dse-5.0', // Long Term Support DataStax Enterprise - 'dse-5.1', // Legacy DataStax Enterprise - 'dse-6.0', // Previous DataStax Enterprise - 'dse-6.7', // Current DataStax Enterprise - 'dse-6.8' // Development DataStax Enterprise - } - } - when { - beforeAgent true - allOf { - expression { params.ADHOC_BUILD_AND_EXECUTE_TESTS_SERVER_VERSION ==~ /(ALL|${env.CASSANDRA_VERSION})/ } - } - } - - agent { - label "${OS_VERSION}" - } - - stages { - stage('Describe-Build') { - steps { - describeAdhocTestingStage() - } - } - stage('Initialize-Environment') { - steps { - initializeEnvironment() - } - } - stage('Install-Driver-And-Compile-Extensions') { - steps { - installDriverAndCompileExtensions() - } - } - stage('Execute-Tests') { - steps { - executeTests() - } - post { - always { - junit testResults: '*_results.xml' - } - } - } - } - } - } - } -} diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index d1daaa6ec6..0000000000 --- a/appveyor.yml +++ /dev/null @@ -1,26 +0,0 @@ -environment: - matrix: - - PYTHON: "C:\\Python27-x64" - cassandra_version: 3.11.2 - ci_type: standard - - PYTHON: "C:\\Python35-x64" - cassandra_version: 3.11.2 - ci_type: standard -os: Visual Studio 2015 -platform: - - x64 -install: - - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" - - ps: .\appveyor\appveyor.ps1 -build_script: - - cmd: | - "%VS140COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64 - python setup.py install --no-cython -test_script: - - ps: .\appveyor\run_test.ps1 -cache: - - C:\Users\appveyor\.m2 - - C:\ProgramData\chocolatey\bin - - C:\ProgramData\chocolatey\lib - - C:\Users\appveyor\jce_policy-1.7.0.zip - - C:\Users\appveyor\jce_policy-1.8.0.zip \ No newline at end of file diff --git a/appveyor/appveyor.ps1 b/appveyor/appveyor.ps1 deleted file mode 100644 index 5f6840e4e1..0000000000 --- a/appveyor/appveyor.ps1 +++ /dev/null @@ -1,80 +0,0 @@ -$env:JAVA_HOME="C:\Program Files\Java\jdk1.8.0" -$env:PATH="$($env:JAVA_HOME)\bin;$($env:PATH)" -$env:CCM_PATH="C:\Users\appveyor\ccm" -$env:CASSANDRA_VERSION=$env:cassandra_version -$env:EVENT_LOOP_MANAGER="asyncore" -$env:SIMULACRON_JAR="C:\Users\appveyor\simulacron-standalone-0.7.0.jar" - -python --version -python -c "import platform; print(platform.architecture())" -# Install Ant -Start-Process cinst -ArgumentList @("-y","ant") -Wait -NoNewWindow -# Workaround for ccm, link ant.exe -> ant.bat -If (!(Test-Path C:\ProgramData\chocolatey\bin\ant.bat)) { - cmd /c mklink C:\ProgramData\chocolatey\bin\ant.bat C:\ProgramData\chocolatey\bin\ant.exe -} - - -$jce_indicator = "$target\README.txt" -# Install Java Cryptographic Extensions, needed for SSL. -If (!(Test-Path $jce_indicator)) { - $zip = "C:\Users\appveyor\jce_policy-$($env:java_version).zip" - $target = "$($env:JAVA_HOME)\jre\lib\security" - # If this file doesn't exist we know JCE hasn't been installed. - $url = "https://www.dropbox.com/s/po4308hlwulpvep/UnlimitedJCEPolicyJDK7.zip?dl=1" - $extract_folder = "UnlimitedJCEPolicy" - If ($env:java_version -eq "1.8.0") { - $url = "https://www.dropbox.com/s/al1e6e92cjdv7m7/jce_policy-8.zip?dl=1" - $extract_folder = "UnlimitedJCEPolicyJDK8" - } - # Download zip to staging area if it doesn't exist, we do this because - # we extract it to the directory based on the platform and we want to cache - # this file so it can apply to all platforms. - if(!(Test-Path $zip)) { - (new-object System.Net.WebClient).DownloadFile($url, $zip) - } - - Add-Type -AssemblyName System.IO.Compression.FileSystem - [System.IO.Compression.ZipFile]::ExtractToDirectory($zip, $target) - - $jcePolicyDir = "$target\$extract_folder" - Move-Item $jcePolicyDir\* $target\ -force - Remove-Item $jcePolicyDir -} - -# Download simulacron -$simulacron_url = "https://github.com/datastax/simulacron/releases/download/0.7.0/simulacron-standalone-0.7.0.jar" -$simulacron_jar = $env:SIMULACRON_JAR -if(!(Test-Path $simulacron_jar)) { - (new-object System.Net.WebClient).DownloadFile($simulacron_url, $simulacron_jar) -} - -# Install Python Dependencies for CCM. -Start-Process python -ArgumentList "-m pip install psutil pyYaml six numpy" -Wait -NoNewWindow - -# Clone ccm from git and use master. -If (!(Test-Path $env:CCM_PATH)) { - Start-Process git -ArgumentList "clone -b cassandra-test https://github.com/pcmanus/ccm.git $($env:CCM_PATH)" -Wait -NoNewWindow -} - - -# Copy ccm -> ccm.py so windows knows to run it. -If (!(Test-Path $env:CCM_PATH\ccm.py)) { - Copy-Item "$env:CCM_PATH\ccm" "$env:CCM_PATH\ccm.py" -} - -$env:PYTHONPATH="$($env:CCM_PATH);$($env:PYTHONPATH)" -$env:PATH="$($env:CCM_PATH);$($env:PATH)" - -# Predownload cassandra version for CCM if it isn't already downloaded. -# This is necessary because otherwise ccm fails -If (!(Test-Path C:\Users\appveyor\.ccm\repository\$env:cassandra_version)) { - Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py create -v $($env:cassandra_version) -n 1 predownload" -Wait -NoNewWindow - echo "Checking status of download" - python $env:CCM_PATH\ccm.py status - Start-Process python -ArgumentList "$($env:CCM_PATH)\ccm.py remove predownload" -Wait -NoNewWindow - echo "Downloaded version $env:cassandra_version" -} - -Start-Process python -ArgumentList "-m pip install -r test-requirements.txt" -Wait -NoNewWindow -Start-Process python -ArgumentList "-m pip install nose-ignore-docstring" -Wait -NoNewWindow diff --git a/appveyor/run_test.ps1 b/appveyor/run_test.ps1 deleted file mode 100644 index fc95ec7e52..0000000000 --- a/appveyor/run_test.ps1 +++ /dev/null @@ -1,49 +0,0 @@ -Set-ExecutionPolicy Unrestricted -Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope Process -force -Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser -force -Get-ExecutionPolicy -List -echo $env:Path -echo "JAVA_HOME: $env:JAVA_HOME" -echo "PYTHONPATH: $env:PYTHONPATH" -echo "Cassandra version: $env:CASSANDRA_VERSION" -echo "Simulacron jar: $env:SIMULACRON_JAR" -echo $env:ci_type -python --version -python -c "import platform; print(platform.architecture())" - -$wc = New-Object 'System.Net.WebClient' - -if($env:ci_type -eq 'unit'){ - echo "Running Unit tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit - - $env:EVENT_LOOP_MANAGER="gevent" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_geventreactor.py - $env:EVENT_LOOP_MANAGER="eventlet" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml .\tests\unit\io\test_eventletreactor.py - $env:EVENT_LOOP_MANAGER="asyncore" - - echo "uploading unit results" - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\unit_results.xml)) - -} - -if($env:ci_type -eq 'standard'){ - - echo "Running CQLEngine integration tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=cqlengine_results.xml .\tests\integration\cqlengine - $cqlengine_tests_result = $lastexitcode - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\cqlengine_results.xml)) - echo "uploading CQLEngine test results" - - echo "Running standard integration tests" - nosetests -s -v --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml .\tests\integration\standard - $integration_tests_result = $lastexitcode - $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\standard_results.xml)) - echo "uploading standard integration test results" -} - - -$exit_result = $unit_tests_result + $cqlengine_tests_result + $integration_tests_result + $simulacron_tests_result -echo "Exit result: $exit_result" -exit $exit_result diff --git a/build.yaml.bak b/build.yaml.bak deleted file mode 100644 index 100c86558a..0000000000 --- a/build.yaml.bak +++ /dev/null @@ -1,264 +0,0 @@ -schedules: - nightly_master: - schedule: nightly - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='libev' - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', '4.0', 'test-dse'] - - commit_long_test: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/long-python.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] - - commit_branches: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/python.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - EXCLUDE_LONG=1 - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.1', '3.0', 'test-dse'] - - commit_branches_dev: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/dev-python.*/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - EXCLUDE_LONG=1 - matrix: - exclude: - - python: [2.7, 3.7, 3.6, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse', 'dse-4.8', 'dse-5.0', 'dse-6.0', 'dse-6.8'] - - release_test: - schedule: per_commit - disable_pull_requests: true - branches: - include: [/release-.+/] - env_vars: | - EVENT_LOOP_MANAGER='libev' - - weekly_master: - schedule: 0 10 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='libev' - matrix: - exclude: - - python: [3.5] - - cassandra: ['2.2', '3.1'] - - weekly_gevent: - schedule: 0 14 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='gevent' - JUST_EVENT_LOOP=1 - - weekly_eventlet: - schedule: 0 18 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='eventlet' - JUST_EVENT_LOOP=1 - - weekly_asyncio: - schedule: 0 22 * * 6 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='asyncio' - JUST_EVENT_LOOP=1 - matrix: - exclude: - - python: [2.7] - - weekly_async: - schedule: 0 10 * * 7 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='asyncore' - JUST_EVENT_LOOP=1 - - weekly_twister: - schedule: 0 14 * * 7 - disable_pull_requests: true - branches: - include: [master] - env_vars: | - EVENT_LOOP_MANAGER='twisted' - JUST_EVENT_LOOP=1 - - upgrade_tests: - schedule: adhoc - branches: - include: [master, python-546] - env_vars: | - EVENT_LOOP_MANAGER='libev' - JUST_UPGRADE=True - matrix: - exclude: - - python: [3.6, 3.7, 3.8] - - cassandra: ['2.0', '2.1', '2.2', '3.0', '4.0', 'test-dse'] - -python: - - 2.7 - - 3.5 - - 3.6 - - 3.7 - - 3.8 - -os: - - ubuntu/bionic64/python-driver - -cassandra: - - '2.1' - - '2.2' - - '3.0' - - '3.11' - - '4.0' - - 'dse-4.8' - - 'dse-5.0' - - 'dse-5.1' - - 'dse-6.0' - - 'dse-6.7' - - 'dse-6.8.0' - -env: - CYTHON: - - CYTHON - - NO_CYTHON - -build: - - script: | - export JAVA_HOME=$CCM_JAVA_HOME - export PATH=$JAVA_HOME/bin:$PATH - export PYTHONPATH="" - export CCM_MAX_HEAP_SIZE=1024M - - # Required for unix socket tests - sudo apt-get install socat - - # Install latest setuptools - pip install --upgrade pip - pip install -U setuptools - - pip install git+ssh://git@github.com/riptano/ccm-private.git@cassandra-7544-native-ports-with-dse-fix - - #pip install $HOME/ccm - - if [ -n "$CCM_IS_DSE" ]; then - pip install -r test-datastax-requirements.txt - else - pip install -r test-requirements.txt - fi - - pip install nose-ignore-docstring - pip install nose-exclude - pip install service_identity - - FORCE_CYTHON=False - if [[ $CYTHON == 'CYTHON' ]]; then - FORCE_CYTHON=True - pip install cython - pip install numpy - # Install the driver & compile C extensions - python setup.py build_ext --inplace - else - # Install the driver & compile C extensions with no cython - python setup.py build_ext --inplace --no-cython - fi - - echo "JUST_UPGRADE: $JUST_UPGRADE" - if [[ $JUST_UPGRADE == 'True' ]]; then - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=upgrade_results.xml tests/integration/upgrade || true - exit 0 - fi - - if [[ $JUST_SMOKE == 'true' ]]; then - # When we ONLY want to run the smoke tests - echo "JUST_SMOKE: $JUST_SMOKE" - echo "==========RUNNING SMOKE TESTS===========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION DSE_VERSION='6.7.0' MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true - exit 0 - fi - - # Run the unit tests, this is not done in travis because - # it takes too much time for the whole matrix to build with cython - if [[ $CYTHON == 'CYTHON' ]]; then - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_results.xml tests/unit/ || true - EVENT_LOOP_MANAGER=eventlet VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_eventlet_results.xml tests/unit/io/test_eventletreactor.py || true - EVENT_LOOP_MANAGER=gevent VERIFY_CYTHON=1 nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=unit_gevent_results.xml tests/unit/io/test_geventreactor.py || true - fi - - if [ -n "$JUST_EVENT_LOOP" ]; then - echo "Running integration event loop subset with $EVENT_LOOP_MANAGER" - EVENT_LOOP_TESTS=( - "tests/integration/standard/test_cluster.py" - "tests/integration/standard/test_concurrent.py" - "tests/integration/standard/test_connection.py" - "tests/integration/standard/test_control_connection.py" - "tests/integration/standard/test_metrics.py" - "tests/integration/standard/test_query.py" - "tests/integration/simulacron/test_endpoint.py" - "tests/integration/long/test_ssl.py" - ) - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml ${EVENT_LOOP_TESTS[@]} || true - exit 0 - fi - - echo "Running with event loop manager: $EVENT_LOOP_MANAGER" - echo "==========RUNNING SIMULACRON TESTS==========" - SIMULACRON_JAR="$HOME/simulacron.jar" - SIMULACRON_JAR=$SIMULACRON_JAR EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=simulacron_results.xml tests/integration/simulacron/ || true - - echo "Running with event loop manager: $EVENT_LOOP_MANAGER" - echo "==========RUNNING CQLENGINE TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=cqle_results.xml tests/integration/cqlengine/ || true - - echo "==========RUNNING INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=standard_results.xml tests/integration/standard/ || true - - if [ -n "$DSE_VERSION" ] && ! [[ $DSE_VERSION == "4.8"* ]]; then - echo "==========RUNNING DSE INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CASSANDRA_DIR=$CCM_INSTALL_DIR DSE_VERSION=$DSE_VERSION ADS_HOME=$HOME/ VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=dse_results.xml tests/integration/advanced/ || true - fi - - echo "==========RUNNING CLOUD TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CLOUD_PROXY_PATH="$HOME/proxy/" CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --with-ignore-docstrings --with-xunit --xunit-file=advanced_results.xml tests/integration/cloud/ || true - - if [ -z "$EXCLUDE_LONG" ]; then - echo "==========RUNNING LONG INTEGRATION TESTS==========" - EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" DSE_VERSION=$DSE_VERSION CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: %(message)s" --exclude-dir=tests/integration/long/upgrade --with-ignore-docstrings --with-xunit --xunit-file=long_results.xml tests/integration/long/ || true - fi - - - xunit: - - "*_results.xml" diff --git a/ci/install_openssl.sh b/ci/install_openssl.sh deleted file mode 100755 index 4545cb0d68..0000000000 --- a/ci/install_openssl.sh +++ /dev/null @@ -1,22 +0,0 @@ -#! /bin/bash -e - -echo "Download and build openssl==1.1.1f" -cd /usr/src -if [[ -f openssl-1.1.1f.tar.gz ]]; then - exit 0 -fi -wget -q https://www.openssl.org/source/openssl-1.1.1f.tar.gz -if [[ -d openssl-1.1.1f ]]; then - exit 0 -fi - -tar -zxf openssl-1.1.1f.tar.gz -cd openssl-1.1.1f -./config -make -s -j2 -make install > /dev/null - -set +e -mv -f /usr/bin/openssl /root/ -mv -f /usr/bin64/openssl /root/ -ln -s /usr/local/ssl/bin/openssl /usr/bin/openssl diff --git a/docs.yaml b/docs.yaml deleted file mode 100644 index 8e29b942e3..0000000000 --- a/docs.yaml +++ /dev/null @@ -1,75 +0,0 @@ -title: DataStax Python Driver -summary: DataStax Python Driver for Apache Cassandra® -output: docs/_build/ -swiftype_drivers: pythondrivers -checks: - external_links: - exclude: - - 'http://aka.ms/vcpython27' -sections: - - title: N/A - prefix: / - type: sphinx - directory: docs - virtualenv_init: | - set -x - CASS_DRIVER_NO_CYTHON=1 pip install -r test-datastax-requirements.txt - # for newer versions this is redundant, but in older versions we need to - # install, e.g., the cassandra driver, and those versions don't specify - # the cassandra driver version in requirements files - CASS_DRIVER_NO_CYTHON=1 python setup.py develop - pip install "jinja2==2.8.1;python_version<'3.6'" "sphinx>=1.3,<2" geomet - # build extensions like libev - CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force -versions: - - name: '3.25' - ref: a83c36a5 - - name: '3.24' - ref: 21cac12b - - name: '3.23' - ref: a40a2af7 - - name: '3.22' - ref: 1ccd5b99 - - name: '3.21' - ref: 5589d96b - - name: '3.20' - ref: d30d166f - - name: '3.19' - ref: ac2471f9 - - name: '3.18' - ref: ec36b957 - - name: '3.17' - ref: 38e359e1 - - name: '3.16' - ref: '3.16.0' - - name: '3.15' - ref: '2ce0bd97' - - name: '3.14' - ref: '9af8bd19' - - name: '3.13' - ref: '3.13.0' - - name: '3.12' - ref: '43b9c995' - - name: '3.11' - ref: '3.11.0' - - name: '3.10' - ref: 64572368 - - name: 3.9 - ref: 3.9-doc - - name: 3.8 - ref: 3.8-doc - - name: 3.7 - ref: 3.7-doc - - name: 3.6 - ref: 3.6-doc - - name: 3.5 - ref: 3.5-doc -redirects: - - \A\/(.*)/\Z: /\1.html -rewrites: - - search: cassandra.apache.org/doc/cql3/CQL.html - replace: cassandra.apache.org/doc/cql3/CQL-3.0.html - - search: http://www.datastax.com/documentation/cql/3.1/ - replace: https://docs.datastax.com/en/archived/cql/3.1/ - - search: http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH - replace: https://docs.datastax.com/en/dse/6.7/cql/cql/cql_reference/cql_commands/cqlBatch.html diff --git a/doxyfile b/doxyfile deleted file mode 100644 index d453557e22..0000000000 --- a/doxyfile +++ /dev/null @@ -1,2339 +0,0 @@ -# Doxyfile 1.8.8 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "Python Driver" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify an logo or icon that is included in -# the documentation. The maximum height of the logo should not exceed 55 pixels -# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo -# to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = NO - -# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = NO - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a -# new page for each member. If set to NO, the documentation of a member will be -# part of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. - -ALIASES = "test_assumptions=\par Test Assumptions\n" \ - "note=\par Note\n" \ - "test_category=\par Test Category\n" \ - "jira_ticket=\par JIRA Ticket\n" \ - "expected_result=\par Expected Result\n" \ - "since=\par Since\n" \ - "param=\par Parameters\n" \ - "return=\par Return\n" \ - "expected_errors=\par Expected Errors\n" - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = YES - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. -# -# Note For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by by putting a % sign in front of the word -# or globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO these classes will be included in the various overviews. This option has -# no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO these declarations will be -# included in the documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the -# todo list. This list is created by putting \todo commands in the -# documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the -# test list. This list is created by putting \test commands in the -# documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES the list -# will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO doxygen will only warn about wrong or incomplete parameter -# documentation, but not about the absence of documentation. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. -# Note: If this tag is empty the current directory is searched. - -INPUT = ./tests - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank the -# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, -# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, -# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, -# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, -# *.qsf, *.as and *.js. - -FILE_PATTERNS = *.py - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = @Test - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. - -INPUT_FILTER = "python /usr/local/bin/doxypy.py" - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER ) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = YES - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES, then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefor more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra stylesheet files is of importance (e.g. the last -# stylesheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the stylesheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to NO can help when comparing the output of multiple runs. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler ( hhc.exe). If non-empty -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated ( -# YES) or that it should be included in the master .chm file ( NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated ( -# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = YES - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using prerendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /